diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile new file mode 100644 index 000000000..4234de160 --- /dev/null +++ b/.ci/Jenkinsfile @@ -0,0 +1,81 @@ +def golang = ['1.23', '1.24'] +def golangDefault = "golang:${golang.last()}" + +async { + + for (version in golang) { + def go = version + + task("test/go${go}") { + container("golang:${go}") { + sh 'make test' + } + } + + task("build/go${go}") { + container("golang:${go}") { + for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { + sh """ + make bin/frostfs-${app} + bin/frostfs-${app} --version + """ + } + } + } + } + + task('test/race') { + container(golangDefault) { + sh 'make test GOFLAGS="-count=1 -race"' + } + } + + task('lint') { + container(golangDefault) { + sh 'make lint-install lint' + } + } + + task('staticcheck') { + container(golangDefault) { + sh 'make staticcheck-install staticcheck-run' + } + } + + task('gopls') { + container(golangDefault) { + sh 'make gopls-install gopls-run' + } + } + + task('gofumpt') { + container(golangDefault) { + sh ''' + make fumpt-install + make fumpt + git diff --exit-code --quiet + ''' + } + } + + task('vulncheck') { + container(golangDefault) { + sh ''' + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + ''' + } + } + + task('pre-commit') { + dockerfile(""" + FROM ${golangDefault} + RUN apt update && \ + apt install -y --no-install-recommends pre-commit + """) { + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } + } + } +} diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm index b3dad06d3..42aeebc48 100644 --- a/.docker/Dockerfile.adm +++ b/.docker/Dockerfile.adm @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci index e9077c831..9ddd8de59 100644 --- a/.docker/Dockerfile.ci +++ b/.docker/Dockerfile.ci @@ -1,4 +1,4 @@ -FROM golang:1.22 +FROM golang:1.23 WORKDIR /tmp diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli index 5adedc140..16f130056 100644 --- a/.docker/Dockerfile.cli +++ b/.docker/Dockerfile.cli @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir index 25025bb2f..c119f8127 100644 --- a/.docker/Dockerfile.ir +++ b/.docker/Dockerfile.ir @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage index a16005516..854f7adea 100644 --- a/.docker/Dockerfile.storage +++ b/.docker/Dockerfile.storage @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md similarity index 100% rename from .github/ISSUE_TEMPLATE/bug_report.md rename to .forgejo/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/config.yml rename to .forgejo/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.md rename to .forgejo/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/logo.svg b/.forgejo/logo.svg similarity index 100% rename from .github/logo.svg rename to .forgejo/logo.svg diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index 86943fe88..d568b9607 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -1,6 +1,10 @@ name: Build -on: [pull_request] +on: + pull_request: + push: + branches: + - master jobs: build: @@ -8,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.21', '1.22' ] + go_versions: [ '1.23', '1.24' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 7c5af8410..190d7764a 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml new file mode 100644 index 000000000..fe91d65f9 --- /dev/null +++ b/.forgejo/workflows/oci-image.yml @@ -0,0 +1,28 @@ +name: OCI image + +on: + push: + workflow_dispatch: + +jobs: + image: + name: Build container images + runs-on: docker + container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm + steps: + - name: Clone git repo + uses: actions/checkout@v3 + + - name: Build OCI image + run: make images + + - name: Push image to OCI registry + run: | + echo "$REGISTRY_PASSWORD" \ + | docker login --username truecloudlab --password-stdin git.frostfs.info + make push-images + if: >- + startsWith(github.ref, 'refs/tags/v') && + (github.event_name == 'workflow_dispatch' || github.event_name == 'push') + env: + REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index 117cda93b..c2e293175 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -1,5 +1,10 @@ name: Pre-commit hooks -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: precommit: @@ -16,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.22 + go-version: 1.24 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index b0c9adbf2..f3f5432ce 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -1,5 +1,10 @@ name: Tests and linters -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: lint: @@ -11,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Install linters @@ -25,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.21', '1.22' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -48,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.21' + go-version: '1.24' cache: true - name: Run tests @@ -63,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Install staticcheck @@ -81,7 +86,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.21' + go-version: '1.22' cache: true - name: Install gopls @@ -89,3 +94,23 @@ jobs: - name: Run gopls run: make gopls-run + + fumpt: + name: Run gofumpt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '1.24' + cache: true + + - name: Install gofumpt + run: make fumpt-install + + - name: Run gofumpt + run: | + make fumpt + git diff --exit-code --quiet diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 3af564c4b..bc94792d8 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -1,5 +1,10 @@ name: Vulncheck -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: vulncheck: @@ -13,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.golangci.yml b/.golangci.yml index d209693aa..e3ec09f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,86 +1,107 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running +version: "2" run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 20m - - # include test files or not, default is true tests: false - -# output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - govet: - # report about shadowed variables - check-shadowing: false - staticcheck: - checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. - funlen: - lines: 80 # default 60 - statements: 60 # default 40 - gocognit: - min-complexity: 40 # default 30 - importas: - no-unaliased: true - no-extra-aliases: false - alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - target-methods : ["reportFlushError", "reportError"] - disable-packages: ["codes", "err", "res","exec"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - + formats: + tab: + path: stdout + colors: false linters: + default: none enable: - # mandatory linters - - govet - - revive - - # some default golangci-lint linters - - errcheck - - gosimple - - godot - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - bidichk - - durationcheck - - exhaustive - - exportloopref - - gofmt - - goimports - - misspell - - predeclared - - reassign - - whitespace - containedctx + - contextcheck + - copyloopvar + - durationcheck + - errcheck + - exhaustive - funlen - gocognit - - contextcheck + - gocritic + - godot - importas - - truecloudlab-linters + - ineffassign + - intrange + - misspell - perfsprint - - testifylint + - predeclared - protogetter - disable-all: true - fast: false + - reassign + - revive + - staticcheck + - testifylint + - truecloudlab-linters + - unconvert + - unparam + - unused + - usetesting + - whitespace + settings: + exhaustive: + default-signifies-exhaustive: true + funlen: + lines: 80 + statements: 60 + gocognit: + min-complexity: 40 + gocritic: + disabled-checks: + - ifElseChain + importas: + alias: + - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + no-unaliased: true + no-extra-aliases: false + staticcheck: + checks: + - all + - -QF1002 + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs + disable-packages: + - codes + - err + - res + - exec + target-methods: + - reportFlushError + - reportError + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + custom-order: true + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.woodpecker/pre-commit.yml b/.woodpecker/pre-commit.yml deleted file mode 100644 index bdf3402de..000000000 --- a/.woodpecker/pre-commit.yml +++ /dev/null @@ -1,11 +0,0 @@ -pipeline: - # Kludge for non-root containers under WoodPecker - fix-ownership: - image: alpine:latest - commands: chown -R 1234:1234 . - - pre-commit: - image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36 - commands: - - export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)" - - pre-commit run --hook-stage manual diff --git a/CHANGELOG.md b/CHANGELOG.md index e4ba6a5d6..92c84ab16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,30 @@ Changelog for FrostFS Node ### Removed ### Updated +## [v0.44.0] - 2024-25-11 - Rongbuk + +### Added +- Allow to prioritize nodes during GET traversal via attributes (#1439) +- Add metrics for the frostfsid cache (#1464) +- Customize constant attributes attached to every tracing span (#1488) +- Manage additional keys in the `frostfsid` contract (#1505) +- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) + +### Changed +- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) +- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) + +### Fixed +- Fix EC object search (#1408) +- Fix EC object put when one of the nodes is unavailable (#1427) + +### Removed +- Drop most of the eACL-related code (#1425) +- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) + +### Upgrading from v0.43.0 +The metabase schema has changed completely, resync is required. + ## [v0.42.0] ### Added diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..d19c96a5c --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers +.forgejo/.* @potyarkin +Makefile @potyarkin diff --git a/Makefile b/Makefile index c93d06aa8..575eaae6f 100755 --- a/Makefile +++ b/Makefile @@ -1,23 +1,23 @@ #!/usr/bin/make -f SHELL = bash +.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") -HUB_IMAGE ?= truecloudlab/frostfs +HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.56.1 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.5 +GO_VERSION ?= 1.23 +LINT_VERSION ?= 2.0.2 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 -PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf) -PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2) +PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2023.1.6 +STATICCHECK_VERSION ?= 2025.1.1 ARCH = amd64 BIN = bin @@ -28,25 +28,22 @@ DIRS = $(BIN) $(RELEASE) CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*))) BINS = $(addprefix $(BIN)/, $(CMDS)) -# .deb package versioning -OS_RELEASE = $(shell lsb_release -cs) -PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \ - sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \ - sed "s/-/~/")-${OS_RELEASE} - OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION) TMP_DIR := .cache PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION) -PROTOC_GEN_GO_DIR ?= $(PROTOBUF_DIR)/protoc-gen-go-$(PROTOC_GEN_GO_VERSION) PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION) STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION) SOURCES = $(shell find . -type f -name "*.go" -print) -GOPLS_VERSION ?= v0.15.1 +GOFUMPT_VERSION ?= v0.7.0 +GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt +GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) + +GOPLS_VERSION ?= v0.17.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -56,7 +53,7 @@ LOCODE_DB_PATH=$(abspath ./.cache/locode_db) LOCODE_DB_VERSION=v0.4.0 .PHONY: help all images dep clean fmts fumpt imports test lint docker/lint - prepare-release debpackage pre-commit unpre-commit + prepare-release pre-commit unpre-commit # To build a specific binary, use it's name prefix with bin/ as a target # For example `make bin/frostfs-node` will build only storage node binary @@ -103,17 +100,15 @@ export-metrics: dep # Regenerate proto files: protoc: - @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOC_GEN_GO_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \ + @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \ make protoc-install; \ fi @for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \ echo "⇒ Processing $$f "; \ $(PROTOC_DIR)/bin/protoc \ --proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \ - --plugin=protoc-gen-go=$(PROTOC_GEN_GO_DIR)/protoc-gen-go \ --plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \ --go-frostfs_out=. --go-frostfs_opt=paths=source_relative \ - --go_out=. --go_opt=paths=source_relative \ --go-grpc_opt=require_unimplemented_servers=false \ --go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \ done @@ -121,15 +116,13 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir $(PROTOBUF_DIR) + @mkdir -p $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip - @echo "⇒ Installing protoc-gen-go..." - @GOBIN=$(PROTOC_GEN_GO_DIR) go install -v google.golang.org/protobuf/...@$(PROTOC_GEN_GO_VERSION) @echo "⇒ Instaling protogen FrostFS plugin..." - @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION) + @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION) # Build FrostFS component's docker image image-%: @@ -147,6 +140,15 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm +# Push FrostFS components' docker image to the registry +push-image-%: + @echo "⇒ Publish FrostFS $* docker image " + @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) + +# Push all Docker images to the registry +.PHONY: push-images +push-images: push-image-storage push-image-ir push-image-cli push-image-adm + # Run `make %` in Golang container docker/%: docker run --rm -t \ @@ -165,10 +167,19 @@ imports: @echo "⇒ Processing goimports check" @goimports -w cmd/ pkg/ misc/ +# Install gofumpt +fumpt-install: + @rm -rf $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) + @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) + # Run gofumpt fumpt: + @if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \ + make fumpt-install; \ + fi @echo "⇒ Processing gofumpt check" - @gofumpt -l -w cmd/ pkg/ misc/ + $(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/ # Run Unit Test with go test test: GOFLAGS ?= "-count=1" @@ -176,21 +187,44 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... +# Install Gerrit commit-msg hook +review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks +review-install: + @git config remote.review.url \ + || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node + @mkdir -p $(GIT_HOOK_DIR)/ + @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg + @chmod +x $(GIT_HOOK_DIR)/commit-msg + @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg + @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg + +# Create a PR in Gerrit +review: BRANCH ?= master +review: + @git push review HEAD:refs/for/$(BRANCH) \ + --push-option r=e.stratonikov@yadro.com \ + --push-option r=d.stepanov@yadro.com \ + --push-option r=an.nikiforov@yadro.com \ + --push-option r=a.arifullin@yadro.com \ + --push-option r=ekaterina.lebedeva@yadro.com \ + --push-option r=a.savchuk@yadro.com \ + --push-option r=a.chuprov@yadro.com + # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: +lint-install: $(BIN) @rm -rf $(OUTPUT_LINT_DIR) - @mkdir $(OUTPUT_LINT_DIR) + @mkdir -p $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -202,7 +236,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -215,7 +249,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls @@ -256,19 +290,6 @@ clean: rm -rf $(BIN) rm -rf $(RELEASE) -# Package for Debian -debpackage: - dch -b --package frostfs-node \ - --controlmaint \ - --newversion $(PKG_VERSION) \ - --distribution $(OS_RELEASE) \ - "Please see CHANGELOG.md for code changes for $(VERSION)" - dpkg-buildpackage --no-sign -b - -# Cleanup deb package build directories -debclean: - dh clean - # Download locode database locode-download: mkdir -p $(TMP_DIR) @@ -282,10 +303,12 @@ env-up: all echo "Frostfs contracts not found"; exit 1; \ fi ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ + --storage-wallet ./dev/storage/wallet01.json \ + --storage-wallet ./dev/storage/wallet02.json \ + --storage-wallet ./dev/storage/wallet03.json \ + --storage-wallet ./dev/storage/wallet04.json + @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ make locode-download; \ fi @@ -294,7 +317,6 @@ env-up: all # Shutdown dev environment env-down: - docker compose -f dev/docker-compose.yml down - docker volume rm -f frostfs-node_neo-go + docker compose -f dev/docker-compose.yml down -v rm -rf ./$(TMP_DIR)/state rm -rf ./$(TMP_DIR)/storage diff --git a/README.md b/README.md index 413010372..0109ed0e5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- FrostFS + FrostFS

@@ -7,9 +7,8 @@

--- -[![Report](https://goreportcard.com/badge/github.com/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node) -![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/TrueCloudLab/frostfs-node?sort=semver) -![License](https://img.shields.io/github/license/TrueCloudLab/frostfs-node.svg?style=popout) +[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node) +![Release (latest)](https://git.frostfs.info/TrueCloudLab/frostfs-node/badges/release.svg) # Overview @@ -33,8 +32,8 @@ manipulate large amounts of data without paying a prohibitive price. FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has protocol gateways for popular protocols such as [AWS -S3](https://github.com/TrueCloudLab/frostfs-s3-gw), -[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw), +S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw), +[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw), [FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and [sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing developers to integrate applications without rewriting their code. @@ -45,11 +44,11 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More platforms will be officially supported after release `1.0`. The latest version of frostfs-node works with frostfs-contract -[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0). +[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2). # Building -To make all binaries you need Go 1.21+ and `make`: +To make all binaries you need Go 1.22+ and `make`: ``` make all ``` @@ -71,7 +70,7 @@ make docker/bin/frostfs- # build a specific binary ## Docker images -To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use: +To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use: ``` make images ``` @@ -99,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions. 4. To create container and put object into it run (container and object IDs will be different): ``` -./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await +./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await Enter password > <- press ENTER, the is no password for wallet CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju @@ -125,7 +124,7 @@ the feature/topic you are going to implement. # Credits -FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and +FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and contributions from community members. Please see [CREDITS](CREDITS.md) for details. diff --git a/VERSION b/VERSION index 01efe7f3a..9052dab96 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.42.0 +v0.44.0 diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md index 87d2e47c1..b4b1ed8e4 100644 --- a/cmd/frostfs-adm/docs/deploy.md +++ b/cmd/frostfs-adm/docs/deploy.md @@ -9,8 +9,8 @@ related configuration details. To follow this guide you need: - latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment), -- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment), -- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment). +- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment), +- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment). ## Step 1: Prepare network configuration diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 81395edb0..f194e97f5 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -16,10 +16,18 @@ const ( EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagShort = "r" + WalletPath = "wallet" + WalletPathShorthand = "w" + WalletPathUsage = "Path to the wallet" + AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" + AdminWalletPath = "wallet-admin" + AdminWalletUsage = "Path to the admin wallet" + LocalDumpFlag = "local-dump" + ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)" ContractsURLFlag = "contracts-url" diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go index a98245d01..69153f0d7 100644 --- a/cmd/frostfs-adm/internal/modules/config/config.go +++ b/cmd/frostfs-adm/internal/modules/config/config.go @@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) { tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets") var i innerring.GlagoliticLetter - for i = 0; i < innerring.GlagoliticLetter(credSize); i++ { + for i = range innerring.GlagoliticLetter(credSize) { tmpl.Glagolitics = append(tmpl.Glagolitics, i.String()) } diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go new file mode 100644 index 000000000..d67b70d2a --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go @@ -0,0 +1,15 @@ +package maintenance + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" + "github.com/spf13/cobra" +) + +var RootCmd = &cobra.Command{ + Use: "maintenance", + Short: "Section for maintenance commands", +} + +func init() { + RootCmd.AddCommand(zombie.Cmd) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go new file mode 100644 index 000000000..1b66889aa --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go @@ -0,0 +1,70 @@ +package zombie + +import ( + "crypto/ecdsa" + "fmt" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { + keyDesc := viper.GetString(walletFlag) + if keyDesc == "" { + return &nodeconfig.Key(appCfg).PrivateKey + } + data, err := os.ReadFile(keyDesc) + commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) + + priv, err := keys.NewPrivateKeyFromBytes(data) + if err != nil { + w, err := wallet.NewWalletFromFile(keyDesc) + commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) + return fromWallet(cmd, w, viper.GetString(addressFlag)) + } + return &priv.PrivateKey +} + +func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { + var ( + addr util.Uint160 + err error + ) + + if addrStr == "" { + addr = w.GetChangeAddress() + } else { + addr, err = flags.ParseAddress(addrStr) + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) + } + + acc := w.GetAccount(addr) + if acc == nil { + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) + } + + pass, err := getPassword() + commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) + + commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) + + return &acc.PrivateKey().PrivateKey +} + +func getPassword() (string, error) { + // this check allows empty passwords + if viper.IsSet("password") { + return viper.GetString("password"), nil + } + + return input.ReadPassword("Enter password > ") +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go new file mode 100644 index 000000000..f73f33db9 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go @@ -0,0 +1,31 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func list(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + var containerID *cid.ID + if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { + containerID = &cid.ID{} + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + } + + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { + if containerID != nil && a.Container() != *containerID { + return nil + } + cmd.Println(a.EncodeToString()) + return nil + })) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go new file mode 100644 index 000000000..cd3a64499 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go @@ -0,0 +1,46 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "github.com/spf13/cobra" +) + +func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(cmd.Context(), + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + commonCmd.ExitOnErr(cmd, "create morph client: %w", err) + return cli +} + +func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { + hs, err := morph.NNSContractAddress(client.NNSContainerContractName) + commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) + cc, err := cntClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) + return cc +} + +func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { + hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) + commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) + cli, err := netmapClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) + return cli +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go new file mode 100644 index 000000000..27f83aec7 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go @@ -0,0 +1,154 @@ +package zombie + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "sync" + + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +type quarantine struct { + // mtx protects current field. + mtx sync.Mutex + current int + trees []*fstree.FSTree +} + +func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { + var paths []string + for _, sh := range engineInfo.Shards { + var storagePaths []string + for _, st := range sh.BlobStorInfo.SubStorages { + storagePaths = append(storagePaths, st.Path) + } + if len(storagePaths) == 0 { + continue + } + paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) + } + q, err := newQuarantine(paths) + commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) + return q +} + +func commonPath(paths []string) string { + if len(paths) == 0 { + return "" + } + if len(paths) == 1 { + return paths[0] + } + minLen := math.MaxInt + for _, p := range paths { + if len(p) < minLen { + minLen = len(p) + } + } + + var sb strings.Builder + for i := range minLen { + for _, path := range paths[1:] { + if paths[0][i] != path[i] { + return sb.String() + } + } + sb.WriteByte(paths[0][i]) + } + return sb.String() +} + +func newQuarantine(paths []string) (*quarantine, error) { + var q quarantine + for i := range paths { + f := fstree.New( + fstree.WithDepth(1), + fstree.WithDirNameLen(1), + fstree.WithPath(paths[i]), + fstree.WithPerm(os.ModePerm), + ) + if err := f.Open(mode.ComponentReadWrite); err != nil { + return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) + } + if err := f.Init(); err != nil { + return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) + } + q.trees = append(q.trees, f) + } + return &q, nil +} + +func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { + for i := range q.trees { + res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) + if err != nil { + continue + } + return res.Object, nil + } + return nil, &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { + for i := range q.trees { + _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) + if err != nil { + continue + } + return nil + } + return &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { + data, err := obj.Marshal() + if err != nil { + return err + } + + var prm common.PutPrm + prm.Address = objectcore.AddressOf(obj) + prm.Object = obj + prm.RawData = data + + q.mtx.Lock() + current := q.current + q.current = (q.current + 1) % len(q.trees) + q.mtx.Unlock() + + _, err = q.trees[current].Put(ctx, prm) + return err +} + +func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { + var prm common.IteratePrm + prm.Handler = func(elem common.IterationElement) error { + return f(elem.Address) + } + for i := range q.trees { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + _, err := q.trees[i].Iterate(ctx, prm) + if err != nil { + return err + } + } + return nil +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go new file mode 100644 index 000000000..0b8f2f172 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go @@ -0,0 +1,55 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func remove(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + removeObject(cmd, q, addr) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + removeObject(cmd, q, addr) + return nil + })) + } +} + +func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { + err := q.Delete(cmd.Context(), addr) + if errors.Is(err, new(apistatus.ObjectNotFound)) { + return + } + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go new file mode 100644 index 000000000..f179c7c2d --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go @@ -0,0 +1,69 @@ +package zombie + +import ( + "crypto/sha256" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func restore(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + restoreObject(cmd, storageEngine, q, addr, cnrCli) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + restoreObject(cmd, storageEngine, q, addr, cnrCli) + return nil + })) + } +} + +func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { + obj, err := q.Get(cmd.Context(), addr) + commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) + rawCID := make([]byte, sha256.Size) + + cid := addr.Container() + cid.Encode(rawCID) + cnr, err := cnrCli.Get(cmd.Context(), rawCID) + commonCmd.ExitOnErr(cmd, "get container: %w", err) + + putPrm := engine.PutPrm{ + Object: obj, + IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), + } + commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go new file mode 100644 index 000000000..c8fd9e5e5 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go @@ -0,0 +1,123 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagBatchSize = "batch-size" + flagBatchSizeUsage = "Objects iteration batch size" + cidFlag = "cid" + cidFlagUsage = "Container ID" + oidFlag = "oid" + oidFlagUsage = "Object ID" + walletFlag = "wallet" + walletFlagShorthand = "w" + walletFlagUsage = "Path to the wallet or binary key" + addressFlag = "address" + addressFlagUsage = "Address of wallet account" + moveFlag = "move" + moveFlagUsage = "Move objects from storage engine to quarantine" +) + +var ( + Cmd = &cobra.Command{ + Use: "zombie", + Short: "Zombie objects related commands", + } + scanCmd = &cobra.Command{ + Use: "scan", + Short: "Scan storage engine for zombie objects and move them to quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) + _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) + _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) + _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) + }, + Run: scan, + } + listCmd = &cobra.Command{ + Use: "list", + Short: "List zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + }, + Run: list, + } + restoreCmd = &cobra.Command{ + Use: "restore", + Short: "Restore zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: restore, + } + removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: remove, + } +) + +func init() { + initScanCmd() + initListCmd() + initRestoreCmd() + initRemoveCmd() +} + +func initScanCmd() { + Cmd.AddCommand(scanCmd) + + scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) + scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) + scanCmd.Flags().String(addressFlag, "", addressFlagUsage) + scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) +} + +func initListCmd() { + Cmd.AddCommand(listCmd) + + listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + listCmd.Flags().String(cidFlag, "", cidFlagUsage) +} + +func initRestoreCmd() { + Cmd.AddCommand(restoreCmd) + + restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) + restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) +} + +func initRemoveCmd() { + Cmd.AddCommand(removeCmd) + + removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + removeCmd.Flags().String(cidFlag, "", cidFlagUsage) + removeCmd.Flags().String(oidFlag, "", oidFlagUsage) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go new file mode 100644 index 000000000..268ec4911 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go @@ -0,0 +1,281 @@ +package zombie + +import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +func scan(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) + if batchSize == 0 { + commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) + } + move, _ := cmd.Flags().GetBool(moveFlag) + + storageEngine := newEngine(cmd, appCfg) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + nmCli := createNetmapClient(cmd, morphClient) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + pk := getPrivateKey(cmd, appCfg) + + epoch, err := nmCli.Epoch(cmd.Context()) + commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) + + nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) + commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) + + cmd.Printf("Epoch: %d\n", nm.Epoch()) + cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) + + ps := &processStatus{ + statusCount: make(map[status]uint64), + } + + stopCh := make(chan struct{}) + start := time.Now() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tick := time.NewTicker(time.Second) + defer tick.Stop() + for { + select { + case <-cmd.Context().Done(): + return + case <-stopCh: + return + case <-tick.C: + fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) + } + } + }() + go func() { + defer wg.Done() + err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) + close(stopCh) + }() + wg.Wait() + commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) + + cmd.Println() + cmd.Println("Status description:") + cmd.Println("undefined -- nothing is clear") + cmd.Println("found -- object is found in cluster") + cmd.Println("quarantine -- object is not found in cluster") + cmd.Println() + for status, count := range ps.statusCount { + cmd.Printf("Status: %s, Count: %d\n", status, count) + } +} + +type status string + +const ( + statusUndefined status = "undefined" + statusFound status = "found" + statusQuarantine status = "quarantine" +) + +func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { + rawCID := make([]byte, sha256.Size) + cid := obj.Address.Container() + cid.Encode(rawCID) + + cnr, err := cnrCli.Get(ctx, rawCID) + if err != nil { + var errContainerNotFound *apistatus.ContainerNotFound + if errors.As(err, &errContainerNotFound) { + // Policer will deal with this object. + return statusFound, nil + } + return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) + } + nm, err := nmCli.NetMap(ctx) + if err != nil { + return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) + } + + nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) + if err != nil { + // Not enough nodes, check all netmap nodes. + nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) + } + + objID := obj.Address.Object() + cnrID := obj.Address.Container() + local := true + raw := false + if obj.ECInfo != nil { + objID = obj.ECInfo.ParentID + local = false + raw = true + } + prm := clientSDK.PrmObjectHead{ + ObjectID: &objID, + ContainerID: &cnrID, + Local: local, + Raw: raw, + } + + var ni clientCore.NodeInfo + for i := range nodes { + for j := range nodes[i] { + if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { + return statusUndefined, fmt.Errorf("parse node info: %w", err) + } + c, err := cc.Get(ni) + if err != nil { + continue + } + res, err := c.ObjectHead(ctx, prm) + if err != nil { + var errECInfo *objectSDK.ECInfoError + if raw && errors.As(err, &errECInfo) { + return statusFound, nil + } + continue + } + if err := apistatus.ErrFromStatus(res.Status()); err != nil { + continue + } + return statusFound, nil + } + } + + if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { + return statusFound, nil + } + return statusQuarantine, nil +} + +func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, + appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, +) error { + cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ + DialTimeout: apiclientconfig.DialTimeout(appCfg), + StreamTimeout: apiclientconfig.StreamTimeout(appCfg), + ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + Key: pk, + AllowExternal: apiclientconfig.AllowExternal(appCfg), + }) + ctx := cmd.Context() + + var cursor *engine.Cursor + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var prm engine.ListWithCursorPrm + prm.WithCursor(cursor) + prm.WithCount(batchSize) + + res, err := storageEngine.ListWithCursor(ctx, prm) + if err != nil { + if errors.Is(err, engine.ErrEndOfListing) { + return nil + } + return fmt.Errorf("list with cursor: %w", err) + } + + cursor = res.Cursor() + addrList := res.AddressList() + eg, egCtx := errgroup.WithContext(ctx) + eg.SetLimit(int(batchSize)) + + for i := range addrList { + addr := addrList[i] + eg.Go(func() error { + result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) + if err != nil { + return fmt.Errorf("check object %s status: %w", addr.Address, err) + } + ps.add(result) + + if !move && result == statusQuarantine { + cmd.Println(addr) + return nil + } + + if result == statusQuarantine { + return moveToQuarantine(egCtx, storageEngine, q, addr.Address) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("process objects batch: %w", err) + } + } +} + +func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { + var getPrm engine.GetPrm + getPrm.WithAddress(addr) + res, err := storageEngine.Get(ctx, getPrm) + if err != nil { + return fmt.Errorf("get object %s from storage engine: %w", addr, err) + } + + if err := q.Put(ctx, res.Object()); err != nil { + return fmt.Errorf("put object %s to quarantine: %w", addr, err) + } + + var delPrm engine.DeletePrm + delPrm.WithForceRemoval() + delPrm.WithAddress(addr) + + if err = storageEngine.Delete(ctx, delPrm); err != nil { + return fmt.Errorf("delete object %s from storage engine: %w", addr, err) + } + return nil +} + +type processStatus struct { + guard sync.RWMutex + statusCount map[status]uint64 + count uint64 +} + +func (s *processStatus) add(st status) { + s.guard.Lock() + defer s.guard.Unlock() + s.statusCount[st]++ + s.count++ +} + +func (s *processStatus) total() uint64 { + s.guard.RLock() + defer s.guard.RUnlock() + return s.count +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go new file mode 100644 index 000000000..5be34d502 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -0,0 +1,201 @@ +package zombie + +import ( + "context" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" + shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" + fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/panjf2000/ants/v2" + "github.com/spf13/cobra" + "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { + ngOpts := storageEngineOptions(c) + shardOpts := shardOptions(cmd, c) + e := engine.New(ngOpts...) + for _, opts := range shardOpts { + _, err := e.AddShard(cmd.Context(), opts...) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + } + commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) + commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) + return e +} + +func storageEngineOptions(c *config.Config) []engine.Option { + return []engine.Option{ + engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), + engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), + } +} + +func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { + var result [][]shard.Option + err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { + result = append(result, getShardOpts(cmd, c, sh)) + return nil + }) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + return result +} + +func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { + wc, wcEnabled := getWriteCacheOpts(sh) + return []shard.Option{ + shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + shard.WithRefillMetabase(sh.RefillMetabase()), + shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), + shard.WithMode(sh.Mode()), + shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), + shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), + shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), + shard.WithWriteCache(wcEnabled), + shard.WithWriteCacheOptions(wc), + shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), + shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), + shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), + shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) + return pool + }), + shard.WithLimiter(qos.NewNoopLimiter()), + } +} + +func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { + if wc := sh.WriteCache(); wc != nil && wc.Enabled() { + var result []writecache.Option + result = append(result, + writecache.WithPath(wc.Path()), + writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), + writecache.WithMaxObjectSize(wc.MaxObjectSize()), + writecache.WithFlushWorkersCount(wc.WorkerCount()), + writecache.WithMaxCacheSize(wc.SizeLimit()), + writecache.WithMaxCacheCount(wc.CountLimit()), + writecache.WithNoSync(wc.NoSync()), + writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + writecache.WithQoSLimiter(qos.NewNoopLimiter()), + ) + return result, true + } + return nil, false +} + +func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { + var piloramaOpts []pilorama.Option + if config.BoolSafe(c.Sub("tree"), "enabled") { + pr := sh.Pilorama() + piloramaOpts = append(piloramaOpts, + pilorama.WithPath(pr.Path()), + pilorama.WithPerm(pr.Perm()), + pilorama.WithNoSync(pr.NoSync()), + pilorama.WithMaxBatchSize(pr.MaxBatchSize()), + pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), + ) + } + return piloramaOpts +} + +func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { + return []meta.Option{ + meta.WithPath(sh.Metabase().Path()), + meta.WithPermissions(sh.Metabase().BoltDB().Perm()), + meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), + meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), + meta.WithBoltDBOptions(&bbolt.Options{ + Timeout: 100 * time.Millisecond, + }), + meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + meta.WithEpochState(&epochState{}), + } +} + +func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { + result := []blobstor.Option{ + blobstor.WithCompression(sh.Compression()), + blobstor.WithStorages(getSubStorages(ctx, sh)), + blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + return result +} + +func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { + var ss []blobstor.SubStorage + for _, storage := range sh.BlobStor().Storages() { + switch storage.Type() { + case blobovniczatree.Type: + sub := blobovniczaconfig.From((*config.Config)(storage)) + blobTreeOpts := []blobovniczatree.Option{ + blobovniczatree.WithRootPath(storage.Path()), + blobovniczatree.WithPermissions(storage.Perm()), + blobovniczatree.WithBlobovniczaSize(sub.Size()), + blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), + blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), + blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), + blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), + blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), + blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), + blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), + blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < sh.SmallSizeLimit() + }, + }) + case fstree.Type: + sub := fstreeconfig.From((*config.Config)(storage)) + fstreeOpts := []fstree.Option{ + fstree.WithPath(storage.Path()), + fstree.WithPerm(storage.Perm()), + fstree.WithDepth(sub.Depth()), + fstree.WithNoSync(sub.NoSync()), + fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: fstree.New(fstreeOpts...), + Policy: func(_ *objectSDK.Object, _ []byte) bool { + return true + }, + }) + default: + // should never happen, that has already + // been handled: when the config was read + } + } + return ss +} + +type epochState struct{} + +func (epochState) CurrentEpoch() uint64 { + return 0 +} diff --git a/cmd/frostfs-adm/internal/modules/metabase/root.go b/cmd/frostfs-adm/internal/modules/metabase/root.go new file mode 100644 index 000000000..5b21ed273 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/metabase/root.go @@ -0,0 +1,15 @@ +package metabase + +import "github.com/spf13/cobra" + +// RootCmd is a root command of config section. +var RootCmd = &cobra.Command{ + Use: "metabase", + Short: "Section for metabase commands", +} + +func init() { + RootCmd.AddCommand(UpgradeCmd) + + initUpgradeCommand() +} diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go new file mode 100644 index 000000000..c0c290c5e --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -0,0 +1,156 @@ +package metabase + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" + shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +const ( + noCompactFlag = "no-compact" +) + +var ( + errNoPathsFound = errors.New("no metabase paths found") + errNoMorphEndpointsFound = errors.New("no morph endpoints found") + errUpgradeFailed = errors.New("upgrade failed") +) + +var UpgradeCmd = &cobra.Command{ + Use: "upgrade", + Short: "Upgrade metabase to latest version", + RunE: upgrade, +} + +func upgrade(cmd *cobra.Command, _ []string) error { + configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag) + if err != nil { + return err + } + configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag) + if err != nil { + return err + } + appCfg := config.New(configFile, configDir, config.EnvPrefix) + paths, err := getMetabasePaths(appCfg) + if err != nil { + return err + } + if len(paths) == 0 { + return errNoPathsFound + } + cmd.Println("found", len(paths), "metabases:") + for i, path := range paths { + cmd.Println(i+1, ":", path) + } + mc, err := createMorphClient(cmd.Context(), appCfg) + if err != nil { + return err + } + defer mc.Close() + civ, err := createContainerInfoProvider(mc) + if err != nil { + return err + } + noCompact, _ := cmd.Flags().GetBool(noCompactFlag) + result := make(map[string]bool) + var resultGuard sync.Mutex + eg, ctx := errgroup.WithContext(cmd.Context()) + for _, path := range paths { + eg.Go(func() error { + var success bool + cmd.Println("upgrading metabase", path, "...") + if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) { + cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...) + }); err != nil { + cmd.Println("error: failed to upgrade metabase", path, ":", err) + } else { + success = true + cmd.Println("metabase", path, "upgraded successfully") + } + resultGuard.Lock() + result[path] = success + resultGuard.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + return err + } + allSuccess := true + for mb, ok := range result { + if ok { + cmd.Println(mb, ": success") + } else { + cmd.Println(mb, ": failed") + allSuccess = false + } + } + if allSuccess { + return nil + } + return errUpgradeFailed +} + +func getMetabasePaths(appCfg *config.Config) ([]string, error) { + var paths []string + if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error { + paths = append(paths, sc.Metabase().Path()) + return nil + }); err != nil { + return nil, fmt.Errorf("get metabase paths: %w", err) + } + return paths, nil +} + +func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + return nil, errNoMorphEndpointsFound + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(ctx, + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + if err != nil { + return nil, fmt.Errorf("create morph client:%w", err) + } + return cli, nil +} + +func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) { + sh, err := cli.NNSContractAddress(client.NNSContainerContractName) + if err != nil { + return nil, fmt.Errorf("resolve container contract hash: %w", err) + } + cc, err := morphcontainer.NewFromMorph(cli, sh, 0) + if err != nil { + return nil, fmt.Errorf("create morph container client: %w", err) + } + return container.NewInfoProvider(func() (container.Source, error) { + return morphcontainer.AsContainerSource(cc), nil + }), nil +} + +func initUpgradeCommand() { + flags := UpgradeCmd.Flags() + flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go index 077e03737..1960faab4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go @@ -5,35 +5,19 @@ import ( "encoding/json" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" - jsonFlag = "json" - jsonFlagDesc = "Output rule chains in JSON format" - chainIDFlag = "chain-id" - chainIDDesc = "Rule chain ID" - ruleFlag = "rule" - ruleFlagDesc = "Rule chain in text format" - pathFlag = "path" - pathFlagDesc = "path to encoded chain in JSON or binary format" - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" - addrAdminFlag = "addr" - addrAdminDesc = "The address of the admins wallet" - chainNameFlag = "chain-name" - chainNameFlagDesc = "Chain name(ingress|s3)" + jsonFlag = "json" + jsonFlagDesc = "Output rule chains in JSON format" + addrAdminFlag = "addr" + addrAdminDesc = "The address of the admins wallet" ) var ( @@ -101,17 +85,17 @@ func initAddRuleChainCmd() { addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc) - _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag) - addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag) + addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) - addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc) - _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag) - addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc) - addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc) - addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) - addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag) + addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag) + addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) + addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) + addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) + addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag) } func initRemoveRuleChainCmd() { @@ -120,26 +104,25 @@ func initRemoveRuleChainCmd() { removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc) - _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag) - removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag) - removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc) - removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) + removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) + removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target") - removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag) + removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag) } func initListRuleChainsCmd() { Cmd.AddCommand(listRuleChainsCmd) listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc) - _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag) - listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag) + listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) + _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc) - listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) + listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) } func initSetAdminCmd() { @@ -161,15 +144,15 @@ func initListTargetsCmd() { Cmd.AddCommand(listTargetsCmd) listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc) - _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag) + listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) + _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } func addRuleChain(cmd *cobra.Command, _ []string) { - chain := parseChain(cmd) + chain := apeCmd.ParseChain(cmd) target := parseTarget(cmd) pci, ac := newPolicyContractInterface(cmd) - h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain) + h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err) @@ -181,14 +164,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) { pci, ac := newPolicyContractInterface(cmd) removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag) if removeAll { - h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target) + h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) cmd.Println("All chains for target removed successfully") } else { - chainID := parseChainID(cmd) - h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID) + chainID := apeCmd.ParseChainID(cmd) + h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) @@ -199,7 +182,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) { func listRuleChains(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) pci, _ := newPolicyContractReaderInterface(cmd) - chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target) + chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target) commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err) if len(chains) == 0 { return @@ -210,14 +193,14 @@ func listRuleChains(cmd *cobra.Command, _ []string) { prettyJSONFormat(cmd, chains) } else { for _, c := range chains { - parseutil.PrintHumanReadableAPEChain(cmd, c) + apeCmd.PrintHumanReadableAPEChain(cmd, c) } } } func setAdmin(cmd *cobra.Command, _ []string) { s, _ := cmd.Flags().GetString(addrAdminFlag) - addr, err := util.Uint160DecodeStringLE(s) + addr, err := address.StringToUint160(s) commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err) pci, ac := newPolicyContractInterface(cmd) h, vub, err := pci.SetAdmin(addr) @@ -231,12 +214,11 @@ func getAdmin(cmd *cobra.Command, _ []string) { pci, _ := newPolicyContractReaderInterface(cmd) addr, err := pci.GetAdmin() commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err) - cmd.Println(addr.StringLE()) + cmd.Println(address.Uint160ToString(addr)) } func listTargets(cmd *cobra.Command, _ []string) { - typ, err := parseTargetType(cmd) - commonCmd.ExitOnErr(cmd, "parse target type error: %w", err) + typ := apeCmd.ParseTargetType(cmd) pci, inv := newPolicyContractReaderInterface(cmd) sid, it, err := pci.ListTargetsIterator(typ) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 5e17f4014..3c332c3f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -2,13 +2,14 @@ package ape import ( "errors" - "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -18,84 +19,29 @@ import ( "github.com/spf13/viper" ) -const ( - ingress = "ingress" - s3 = "s3" -) - -var mChainName = map[string]apechain.Name{ - ingress: apechain.Ingress, - s3: apechain.S3, -} - -var ( - errUnknownTargetType = errors.New("unknown target type") - errChainIDCannotBeEmpty = errors.New("chain id cannot be empty") - errRuleIsNotParsed = errors.New("rule is not passed") - errUnsupportedChainName = errors.New("unsupported chain name") -) +var errUnknownTargetType = errors.New("unknown target type") func parseTarget(cmd *cobra.Command) policyengine.Target { - name, _ := cmd.Flags().GetString(targetNameFlag) - typ, err := parseTargetType(cmd) - commonCmd.ExitOnErr(cmd, "read target type error: %w", err) - - return policyengine.Target{ - Name: name, - Type: typ, - } -} - -func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) { - typ, _ := cmd.Flags().GetString(targetTypeFlag) + typ := apeCmd.ParseTargetType(cmd) + name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag) switch typ { - case namespaceTarget: - return policyengine.Namespace, nil - case containerTarget: - return policyengine.Container, nil - case userTarget: - return policyengine.User, nil - case groupTarget: - return policyengine.Group, nil + case policyengine.Namespace: + if name == "root" { + name = "" + } + return policyengine.NamespaceTarget(name) + case policyengine.Container: + var cnr cid.ID + commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + return policyengine.ContainerTarget(name) + case policyengine.User: + return policyengine.UserTarget(name) + case policyengine.Group: + return policyengine.GroupTarget(name) + default: + commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) } - return -1, errUnknownTargetType -} - -func parseChainID(cmd *cobra.Command) apechain.ID { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", - errChainIDCannotBeEmpty) - } - return apechain.ID(chainID) -} - -func parseChain(cmd *cobra.Command) *apechain.Chain { - chain := new(apechain.Chain) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed) - } - - chain.ID = parseChainID(cmd) - - cmd.Println("Parsed chain:") - parseutil.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} - -func parseChainName(cmd *cobra.Command) apechain.Name { - chainName, _ := cmd.Flags().GetString(chainNameFlag) - apeChainName, ok := mChainName[strings.ToLower(chainName)] - if !ok { - commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName) - } - return apeChainName + panic("unreachable") } // invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface. @@ -109,16 +55,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { } func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) - var ch util.Uint160 r := management.NewReader(inv) nnsCs, err := helper.GetContractByID(r, 1) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) + ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) invokerAdapter := &invokerAdapter{ @@ -130,10 +75,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag } func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c) + walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) + ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index 5519705d4..23dba14f4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -51,7 +52,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error { nmHash util.Uint160 ) - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } @@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index 3a7f84acb..c17fb62ff 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -26,7 +26,7 @@ import ( const forceConfigSet = "force" func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf[:], v) + copy(nbuf, v) n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: if len(v) == 0 || len(v) > 1 { return helper.InvalidConfigValueErr(k) } - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) default: - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) } } diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go index a3b4f129a..be4041a86 100644 --- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go +++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go @@ -4,7 +4,6 @@ import "time" const ( ConsensusAccountName = "consensus" - ProtoConfigPath = "protocol" // MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size // of the invocation script. diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index a66438975..79685f111 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -76,7 +77,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error { return fmt.Errorf("invalid filename: %w", err) } - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -139,13 +140,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error { func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) { bw.Reset() emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id) - emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id) res, err := inv.Run(bw.Bytes()) if err != nil { return nil, fmt.Errorf("can't get container info: %w", err) } - if len(res.Stack) != 2 { - return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse) + if len(res.Stack) != 1 { + return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse) } cnt := new(Container) @@ -154,19 +154,11 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) } - ea := new(EACL) - err = ea.FromStackItem(res.Stack[1]) - if err != nil { - return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } - if len(ea.Value) != 0 { - cnt.EACL = ea - } return cnt, nil } func listContainers(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -244,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err @@ -258,10 +248,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) { emit.AppCall(bw.BinWriter, ch, "put", callflag.All, cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token) - if ea := cnt.EACL; ea != nil { - emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All, - ea.Value, ea.Signature, ea.PublicKey, ea.Token) - } } func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) { @@ -322,15 +308,6 @@ type Container struct { Signature []byte `json:"signature"` PublicKey []byte `json:"public_key"` Token []byte `json:"token"` - EACL *EACL `json:"eacl"` -} - -// EACL represents extended ACL struct in contract storage. -type EACL struct { - Value []byte `json:"value"` - Signature []byte `json:"signature"` - PublicKey []byte `json:"public_key"` - Token []byte `json:"token"` } // ToStackItem implements stackitem.Convertible. @@ -377,50 +354,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error { return nil } -// ToStackItem implements stackitem.Convertible. -func (c *EACL) ToStackItem() (stackitem.Item, error) { - return stackitem.NewStruct([]stackitem.Item{ - stackitem.NewByteArray(c.Value), - stackitem.NewByteArray(c.Signature), - stackitem.NewByteArray(c.PublicKey), - stackitem.NewByteArray(c.Token), - }), nil -} - -// FromStackItem implements stackitem.Convertible. -func (c *EACL) FromStackItem(item stackitem.Item) error { - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) != 4 { - return errors.New("invalid stack item type") - } - - value, err := arr[0].TryBytes() - if err != nil { - return errors.New("invalid eACL value") - } - - sig, err := arr[1].TryBytes() - if err != nil { - return errors.New("invalid eACL signature") - } - - pub, err := arr[2].TryBytes() - if err != nil { - return errors.New("invalid eACL public key") - } - - tok, err := arr[3].TryBytes() - if err != nil { - return errors.New("invalid eACL token") - } - - c.Value = value - c.Signature = sig - c.PublicKey = pub - c.Token = tok - return nil -} - // getCIDFilterFunc returns filtering function for container IDs. // Raw byte slices are used because it works with structures returned // from contract. diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 5adb480da..543b5fcb3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - if writer.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } + assert.NoError(writer.Err, "can't create deployment script") if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } else if bw.Len() != start { + assert.NoError(bw.Err, "can't create deployment script") + if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 5a0d29550..fde58fd2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -36,7 +37,7 @@ type contractDumpInfo struct { } func dumpContractHashes(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -68,7 +69,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { if irSize != 0 { bw.Reset() - for i := 0; i < irSize; i++ { + for i := range irSize { emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly, helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) @@ -79,7 +80,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't fetch info from NNS: %w", err) } - for i := 0; i < irSize; i++ { + for i := range irSize { info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)} if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil { info.hash = h @@ -219,8 +220,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { if info.version == "" { info.version = "unknown" } - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", + info.name, info.version, info.hash.StringLE())) } _ = tw.Flush() @@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - if sub.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(sub.Err, "can't create version script") script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.BinWriter.WriteBytes(script) + bw.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(bw.Err, "can't create version script") res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go new file mode 100644 index 000000000..4046e85e3 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go @@ -0,0 +1,83 @@ +package frostfsid + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + frostfsidAddSubjectKeyCmd = &cobra.Command{ + Use: "add-subject-key", + Short: "Add a public key to the subject in frostfsid contract", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidAddSubjectKey, + } + frostfsidRemoveSubjectKeyCmd = &cobra.Command{ + Use: "remove-subject-key", + Short: "Remove a public key from the subject in frostfsid contract", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidRemoveSubjectKey, + } +) + +func initFrostfsIDAddSubjectKeyCmd() { + Cmd.AddCommand(frostfsidAddSubjectKeyCmd) + + ff := frostfsidAddSubjectKeyCmd.Flags() + ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + + ff.String(subjectAddressFlag, "", "Subject address") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) + + ff.String(subjectKeyFlag, "", "Public key to add") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) +} + +func initFrostfsIDRemoveSubjectKeyCmd() { + Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd) + + ff := frostfsidRemoveSubjectKeyCmd.Flags() + ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + + ff.String(subjectAddressFlag, "", "Subject address") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) + + ff.String(subjectKeyFlag, "", "Public key to remove") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) +} + +func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) { + addr := getFrostfsIDSubjectAddress(cmd) + pub := getFrostfsIDSubjectKey(cmd) + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub)) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "add subject key: %w", err) +} + +func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) { + addr := getFrostfsIDSubjectAddress(cmd) + pub := getFrostfsIDSubjectKey(cmd) + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub)) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "remove subject key: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 091d6634a..7f777db98 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,6 +1,7 @@ package frostfsid import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -34,11 +35,16 @@ const ( subjectNameFlag = "subject-name" subjectKeyFlag = "subject-key" subjectAddressFlag = "subject-address" - includeNamesFlag = "include-names" + extendedFlag = "extended" groupNameFlag = "group-name" groupIDFlag = "group-id" rootNamespacePlaceholder = "" + + keyFlag = "key" + keyDescFlag = "Key for storing a value in the subject's KV storage" + valueFlag = "value" + valueDescFlag = "Value to be stored in the subject's KV storage" ) var ( @@ -61,7 +67,6 @@ var ( Use: "list-namespaces", Short: "List all namespaces in frostfsid", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListNamespaces, @@ -91,7 +96,6 @@ var ( Use: "list-subjects", Short: "List subjects in namespace", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListSubjects, @@ -121,7 +125,6 @@ var ( Use: "list-groups", Short: "List groups in namespace", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListGroups, @@ -151,11 +154,27 @@ var ( Use: "list-group-subjects", Short: "List subjects in group", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListGroupSubjects, } + + frostfsidSetKVCmd = &cobra.Command{ + Use: "set-kv", + Short: "Store a key-value pair in the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidSetKV, + } + frostfsidDeleteKVCmd = &cobra.Command{ + Use: "delete-kv", + Short: "Delete a value from the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidDeleteKV, + } ) func initFrostfsIDCreateNamespaceCmd() { @@ -169,7 +188,6 @@ func initFrostfsIDCreateNamespaceCmd() { func initFrostfsIDListNamespacesCmd() { Cmd.AddCommand(frostfsidListNamespacesCmd) frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidListNamespacesCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initFrostfsIDCreateSubjectCmd() { @@ -192,8 +210,7 @@ func initFrostfsIDListSubjectsCmd() { Cmd.AddCommand(frostfsidListSubjectsCmd) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") - frostfsidListSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") } func initFrostfsIDCreateGroupCmd() { @@ -217,7 +234,6 @@ func initFrostfsIDListGroupsCmd() { Cmd.AddCommand(frostfsidListGroupsCmd) frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups") - frostfsidListGroupsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initFrostfsIDAddSubjectToGroupCmd() { @@ -241,8 +257,22 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") - frostfsidListGroupSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") +} + +func initFrostfsIDSetKVCmd() { + Cmd.AddCommand(frostfsidSetKVCmd) + frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) + frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) +} + +func initFrostfsIDDeleteKVCmd() { + Cmd.AddCommand(frostfsidDeleteKVCmd) + frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) } func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { @@ -262,7 +292,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) { reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaces() commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) namespaces, err := frostfsidclient.ParseNamespaces(items) @@ -307,34 +337,32 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { } func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) ns := getFrostfsIDNamespace(cmd) inv, _, hash := initInvoker(cmd) reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaceSubjects(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID)) + subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) for _, addr := range subAddresses { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(addr)) continue } - sessionID, it, err := reader.ListSubjects() + items, err := reader.GetSubject(addr) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name) + printSubjectInfo(cmd, addr, subj) + cmd.Println() } } @@ -374,7 +402,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroups(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) groups, err := frostfsidclient.ParseGroups(items) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) @@ -412,10 +440,49 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) } +func frostfsidSetKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + value, _ := cmd.Flags().GetString(valueFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "set KV: %w", err) +} + +func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "delete KV: %w", err) +} + func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) inv, cs, hash := initInvoker(cmd) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) @@ -424,7 +491,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) @@ -433,7 +500,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) for _, subjAddr := range subjects { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(subjAddr)) continue } @@ -442,7 +509,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name) + printSubjectInfo(cmd, subjAddr, subj) + cmd.Println() } } @@ -497,32 +565,28 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { } f.bw.Reset() - if len(f.wCtx.SentTxs) == 0 { - return nil, errors.New("no transactions to wait") - } - f.wCtx.Command.Println("Waiting for transactions to persist...") return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) } -func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) { +func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { var shouldStop bool res := make([]stackitem.Item, 0) for !shouldStop { - items, err := inv.TraverseIterator(sessionID, iter, batchSize) + items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) if err != nil { return nil, err } res = append(res, items...) - shouldStop = len(items) < batchSize + shouldStop = len(items) < iteratorBatchSize } return res, nil } func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) @@ -536,3 +600,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui return inv, cs, nmHash } + +func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { + cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) + pk := "" + if subj.PrimaryKey != nil { + pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) + } + cmd.Printf("Primary key: %s\n", pk) + cmd.Printf("Name: %s\n", subj.Name) + cmd.Printf("Namespace: %s\n", subj.Namespace) + if len(subj.AdditionalKeys) > 0 { + cmd.Printf("Additional keys:\n") + for _, key := range subj.AdditionalKeys { + k := "" + if key != nil { + k = hex.EncodeToString(key.Bytes()) + } + cmd.Printf("- %s\n", k) + } + } + if len(subj.KV) > 0 { + cmd.Printf("KV:\n") + for k, v := range subj.KV { + cmd.Printf("- %s: %s\n", k, v) + } + } +} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go index cce859d2f..1d0bc8441 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go @@ -1,59 +1,12 @@ package frostfsid import ( - "encoding/hex" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/spf13/viper" "github.com/stretchr/testify/require" ) -func TestFrostfsIDConfig(t *testing.T) { - pks := make([]*keys.PrivateKey, 4) - for i := range pks { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - pks[i] = pk - } - - fmts := []string{ - pks[0].GetScriptHash().StringLE(), - address.Uint160ToString(pks[1].GetScriptHash()), - hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), - hex.EncodeToString(pks[3].PublicKey().Bytes()), - } - - for i := range fmts { - v := viper.New() - v.Set("frostfsid.admin", fmts[i]) - - actual, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, pks[i].GetScriptHash(), actual) - } - - t.Run("bad key", func(t *testing.T) { - v := viper.New() - v.Set("frostfsid.admin", "abc") - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.Error(t, err) - require.True(t, found) - }) - t.Run("missing key", func(t *testing.T) { - v := viper.New() - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.False(t, found) - }) -} - func TestNamespaceRegexp(t *testing.T) { for _, tc := range []struct { name string diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 850474794..8aad5c5c1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,4 +12,8 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() + initFrostfsIDSetKVCmd() + initFrostfsIDDeleteKVCmd() + initFrostfsIDAddSubjectKeyCmd() + initFrostfsIDRemoveSubjectKeyCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index c7de599e5..78f8617f1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" "github.com/nspcc-dev/neo-go/pkg/smartcontract" @@ -73,7 +72,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er return nil, fmt.Errorf("can't fetch password: %w", err) } - i := i errG.Go(func() error { p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") f, err := os.OpenFile(p, os.O_CREATE, 0o644) @@ -107,7 +105,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er // Create consensus account with 2*N/3+1 multi-signature. bftCount := smartcontract.GetDefaultHonestNodeCount(size) for i := range wallets { - i := i ps := pubs.Copy() errG.Go(func() error { if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil { @@ -143,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key } func generateStorageCreds(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, storageGasConfigFlag, true) -} - -func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) { - // storage wallet path is not part of the config - storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - // wallet address is not part of the config - walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) - - var gasReceiver util.Uint160 - - if len(walletAddress) != 0 { - gasReceiver, err = address.StringToUint160(walletAddress) - if err != nil { - return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) - } - } else { - if storageWalletPath == "" { - return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) - } - - var w *wallet.Wallet - - if createWallet { - w, err = wallet.NewWallet(storageWalletPath) - } else { - w, err = wallet.NewWalletFromFile(storageWalletPath) - } - - if err != nil { - return fmt.Errorf("can't create wallet: %w", err) - } - - if createWallet { - var password string - - label, _ := cmd.Flags().GetString(storageWalletLabelFlag) - password, err := config.GetStoragePassword(viper.GetViper(), label) - if err != nil { - return fmt.Errorf("can't fetch password: %w", err) - } - - if label == "" { - label = constants.SingleAccountName - } - - if err := w.CreateAccount(label, password); err != nil { - return fmt.Errorf("can't create account: %w", err) - } - } - - gasReceiver = w.Accounts[0].Contract.ScriptHash() + walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) + w, err := wallet.NewWallet(walletPath) + if err != nil { + return fmt.Errorf("create wallet: %w", err) } + label, _ := cmd.Flags().GetString(storageWalletLabelFlag) + password, err := config.GetStoragePassword(viper.GetViper(), label) + if err != nil { + return fmt.Errorf("can't fetch password: %w", err) + } + + if label == "" { + label = constants.SingleAccountName + } + + if err := w.CreateAccount(label, password); err != nil { + return fmt.Errorf("can't create account: %w", err) + } + return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) +} + +func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) { gasStr := viper.GetString(gasFlag) gasAmount, err := helper.ParseGASAmount(gasStr) @@ -210,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, - wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) + for _, gasReceiver := range gasReceivers { + emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, + wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) + emit.Opcodes(bw.BinWriter, opcode.ASSERT) + } if bw.Err != nil { return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go index 1dd6420eb..15af5637b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go @@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) { buf.Reset() v.Set(commonflags.AlphabetWalletsFlag, walletDir) require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10))) - for i := uint64(0); i < size; i++ { + for i := range uint64(size) { buf.WriteString(strconv.FormatUint(i, 10) + "\r") } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go index 3633d9a8e..73c986713 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go @@ -1,7 +1,12 @@ package generate import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -33,7 +38,27 @@ var ( _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) }, RunE: func(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, commonflags.RefillGasAmountFlag, false) + storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) + walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag) + + var gasReceivers []util.Uint160 + for _, walletAddress := range walletAddresses { + addr, err := address.StringToUint160(walletAddress) + if err != nil { + return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) + } + + gasReceivers = append(gasReceivers, addr) + } + for _, storageWalletPath := range storageWalletPaths { + w, err := wallet.NewWalletFromFile(storageWalletPath) + if err != nil { + return fmt.Errorf("can't create wallet: %w", err) + } + + gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash()) + } + return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...) }, } GenerateAlphabetCmd = &cobra.Command{ @@ -50,10 +75,10 @@ var ( func initRefillGasCmd() { RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") - RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet") + RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") + RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") - RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag) + RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) } func initGenerateStorageCmd() { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index 1ca246f9f..6499ace5f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -3,10 +3,6 @@ package helper import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -17,7 +13,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -29,44 +24,88 @@ type LocalActor struct { rpcInvoker invoker.RPCInvoke } +type AlphabetWallets struct { + Label string + Path string +} + +func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { + w, err := GetAlphabetWallets(v, a.Path) + if err != nil { + return nil, err + } + + var accounts []*wallet.Account + for _, wall := range w { + acc, err := GetWalletAccount(wall, a.Label) + if err != nil { + return nil, err + } + accounts = append(accounts, acc) + } + return accounts, nil +} + +type RegularWallets struct{ Path string } + +func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { + w, err := getRegularWallet(r.Path) + if err != nil { + return nil, err + } + + return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil +} + // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. -func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) +// +// If wallets are provided, the contract client will use accounts with accName name from these wallets. +// To determine which account name should be used in a contract client, refer to how the contract +// verifies the transaction signature. +func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { var act *actor.Actor var accounts []*wallet.Account - if walletDir == "" { - account, err := wallet.NewAccount() - commonCmd.ExitOnErr(cmd, "unable to create dummy account: %w", err) - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: account.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account, - }}) - if err != nil { - return nil, err - } - } else { - wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) + var signers []actor.SignerAccount - for _, w := range wallets { - acc, err := GetWalletAccount(w, constants.CommitteeAccountName) - commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err) - accounts = append(accounts, acc) - } - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: accounts[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: accounts[0], - }}) + if alphabet != nil { + account, err := alphabet.GetAccount(viper.GetViper()) if err != nil { return nil, err } + + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) + } + + for _, w := range regularWallets { + if w == nil { + continue + } + account, err := w.GetAccount() + if err != nil { + return nil, err + } + + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) + } + + act, err := actor.New(c, signers) + if err != nil { + return nil, err } return &LocalActor{ neoActor: act, diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go index 2011301d1..64d1c6393 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go @@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker) } if method != constants.UpdateMethodName || err == nil && !found { - h, found, err = GetFrostfsIDAdmin(viper.GetViper()) + h, found, err = getFrostfsIDAdmin(viper.GetViper()) } if err != nil { return nil, err @@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error { return fmt.Errorf("can't send deploy transaction: %w", err) } + c.Command.Println("NNS hash:", invokeHash.StringLE()) return c.AwaitTx() } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go index f29042b82..fce2dfb74 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go @@ -11,7 +11,7 @@ import ( const frostfsIDAdminConfigKey = "frostfsid.admin" -func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { +func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { admin := v.GetString(frostfsIDAdminConfigKey) if admin == "" { return util.Uint160{}, false, nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go new file mode 100644 index 000000000..38991e962 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go @@ -0,0 +1,53 @@ +package helper + +import ( + "encoding/hex" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +func TestFrostfsIDConfig(t *testing.T) { + pks := make([]*keys.PrivateKey, 4) + for i := range pks { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + pks[i] = pk + } + + fmts := []string{ + pks[0].GetScriptHash().StringLE(), + address.Uint160ToString(pks[1].GetScriptHash()), + hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), + hex.EncodeToString(pks[3].PublicKey().Bytes()), + } + + for i := range fmts { + v := viper.New() + v.Set("frostfsid.admin", fmts[i]) + + actual, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, pks[i].GetScriptHash(), actual) + } + + t.Run("bad key", func(t *testing.T) { + v := viper.New() + v.Set("frostfsid.admin", "abc") + + _, found, err := getFrostfsIDAdmin(v) + require.Error(t, err) + require.True(t, found) + }) + t.Run("missing key", func(t *testing.T) { + v := viper.New() + + _, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.False(t, found) + }) +} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 961ceba53..50b5c1ec7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -13,9 +14,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - switch c.(type) { - case *rpcclient.Client: - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) - default: - b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) - if err != nil { - return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) - } - - return b, nil - } + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) } func CheckNotaryEnabled(c Client) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index b5b6adf05..da5ffedae 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -21,6 +22,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -28,7 +30,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -134,12 +135,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex return nil, err } - accounts, err := createWalletAccounts(wallets) + accounts, err := getSingleAccounts(wallets) if err != nil { return nil, err } - cliCtx, err := DefaultClientContext(c, committeeAcc) + cliCtx, err := defaultClientContext(c, committeeAcc) if err != nil { return nil, fmt.Errorf("client context: %w", err) } @@ -191,7 +192,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) } c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String()) } else { - c, err = GetN3Client(v) + c, err = NewRemoteClient(v) } if err != nil { return nil, fmt.Errorf("can't create N3 client: %w", err) @@ -211,7 +212,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) { return ctrPath, nil } -func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { +func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { accounts := make([]*wallet.Account, len(wallets)) for i, w := range wallets { acc, err := GetWalletAccount(w, constants.SingleAccountName) @@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - if withConsensus { - panic("BUG: should never happen") - } + assert.False(withConsensus, "BUG: should never happen") act, err = c.CommitteeAct, nil } if err != nil { @@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - if err != nil { - // error appears only if client - // has not been initialized - panic(err) - } + // error appears only if client + // has not been initialized + assert.NoError(err) network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { + assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } else if i == len(tx.Scripts) { + } + if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) - } else { - panic("BUG: invalid signing order") } return nil } @@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) return bw.Bytes(), false, nil } @@ -524,12 +519,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) - if err != nil { - return false, err - } - - return res.State == vmstate.Halt.String(), nil + avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) + return !avail, err } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 44d1b4ecf..46611c177 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -8,7 +8,9 @@ import ( "sort" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -47,7 +49,7 @@ type LocalClient struct { } func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) { - cfg, err := config.LoadFile(v.GetString(constants.ProtoConfigPath)) + cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath)) if err != nil { return nil, err } @@ -57,17 +59,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet return nil, err } - m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount)) - accounts := make([]*wallet.Account, len(wallets)) - for i := range accounts { - accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName) - if err != nil { - return nil, err + go bc.Run() + + accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets) + if err != nil { + return nil, err + } + + if cmd.Name() != "init" { + if err := restoreDump(bc, dumpPath); err != nil { + return nil, fmt.Errorf("restore dump: %w", err) } } + return &LocalClient{ + bc: bc, + dumpPath: dumpPath, + accounts: accounts, + }, nil +} + +func restoreDump(bc *core.Blockchain, dumpPath string) error { + f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) + if err != nil { + return fmt.Errorf("can't open local dump: %w", err) + } + defer f.Close() + + r := io.NewBinReaderFromIO(f) + + var skip uint32 + if bc.BlockHeight() != 0 { + skip = bc.BlockHeight() + 1 + } + + count := r.ReadU32LE() - skip + if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { + return err + } + return nil +} + +func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) { + accounts := make([]*wallet.Account, len(wallets)) + for i := range accounts { + acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName) + if err != nil { + return nil, err + } + accounts[i] = acc + } + indexMap := make(map[string]int) - for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee { + for i, pub := range cfg.StandbyCommittee { indexMap[pub] = i } @@ -76,37 +120,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet pj := accounts[j].PrivateKey().PublicKey().Bytes() return indexMap[string(pi)] < indexMap[string(pj)] }) - sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool { + sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool { return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1 }) - go bc.Run() - - if cmd.Name() != "init" { - f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) - if err != nil { - return nil, fmt.Errorf("can't open local dump: %w", err) - } - defer f.Close() - - r := io.NewBinReaderFromIO(f) - - var skip uint32 - if bc.BlockHeight() != 0 { - skip = bc.BlockHeight() + 1 - } - - count := r.ReadU32LE() - skip - if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { - return nil, fmt.Errorf("can't restore local dump: %w", err) - } - } - - return &LocalClient{ - bc: bc, - dumpPath: dumpPath, - accounts: accounts[:m], - }, nil + m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount)) + return accounts[:m], nil } func (l *LocalClient) GetBlockCount() (uint32, error) { @@ -127,11 +146,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul return &a, nil } -func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) { - // not used by `morph init` command - panic("unexpected call") -} - // InvokeFunction is implemented via `InvokeScript`. func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) { var err error @@ -224,7 +238,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}} } else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok { paramz = make([]manifest.Parameter, nSigs) - for j := 0; j < nSigs; j++ { + for j := range nSigs { paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType} } } @@ -295,13 +309,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer) } func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) { - // We need to test that transaction was formed correctly to catch as many errors as we can. - bs := tx.Bytes() - _, err := transaction.NewTransactionFromBytes(bs) - if err != nil { - return tx.Hash(), fmt.Errorf("invalid transaction: %w", err) - } - + tx = tx.Copy() l.transactions = append(l.transactions, tx) return tx.Hash(), nil } @@ -309,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - if err != nil { - panic(err) - } + assert.NoError(err) defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -352,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - if w.Err != nil { - panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) - } + assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index e62a21b3f..3f3a66cb6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" @@ -25,15 +24,10 @@ import ( // Client represents N3 client interface capable of test-invoking scripts // and sending signed transactions to chain. type Client interface { - invoker.RPCInvoke + actor.RPCActor - GetBlockCount() (uint32, error) GetNativeContracts() ([]state.Contract, error) GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) - GetVersion() (*result.Version, error) - SendRawTransaction(*transaction.Transaction) (util.Uint256, error) - GetCommittee() (keys.PublicKeys, error) - CalculateNetworkFee(tx *transaction.Transaction) (int64, error) } type HashVUBPair struct { @@ -48,7 +42,7 @@ type ClientContext struct { SentTxs []HashVUBPair } -func GetN3Client(v *viper.Viper) (Client, error) { +func NewRemoteClient(v *viper.Viper) (Client, error) { // number of opened connections // by neo-go client per one host const ( @@ -88,8 +82,14 @@ func GetN3Client(v *viper.Viper) (Client, error) { return c, nil } -func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { - commAct, err := NewActor(c, committeeAcc) +func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { + commAct, err := actor.New(c, []actor.SignerAccount{{ + Signer: transaction.Signer{ + Account: committeeAcc.Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: committeeAcc, + }}) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index 7a778f8c3..20abaff0a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -3,6 +3,7 @@ package helper import ( "errors" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -72,13 +73,17 @@ func InvalidConfigValueErr(key string) error { return fmt.Errorf("invalid %s config value from netmap contract", key) } -func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error { +func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error { + if countEpoch <= 0 { + return errors.New("number of epochs cannot be less than 1") + } + curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch")) if err != nil { return errors.New("can't fetch current epoch from the netmap contract") } - newEpoch := curr + 1 + newEpoch := curr + countEpoch wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch) // In NeoFS this is done via Notary contract. Here, however, we can form the @@ -114,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { return err } for k, v := range m { - for _, key := range NetmapConfigKeys { - if k == key { - md[k] = v - break - } + if slices.Contains(NetmapConfigKeys, k) { + md[k] = v } } return nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index 2d9281c24..be6b2c6dd 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -14,16 +14,36 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" + "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/viper" ) +func getRegularWallet(walletPath string) (*wallet.Wallet, error) { + w, err := wallet.NewWalletFromFile(walletPath) + if err != nil { + return nil, err + } + + password, err := input.ReadPassword("Enter password for wallet:") + if err != nil { + return nil, fmt.Errorf("can't fetch password: %w", err) + } + + for i := range w.Accounts { + if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { + err = fmt.Errorf("can't unlock wallet: %w", err) + break + } + } + + return w, err +} + func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { wallets, err := openAlphabetWallets(v, walletDir) if err != nil { @@ -44,7 +64,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er var wallets []*wallet.Wallet var letter string - for i := 0; i < constants.MaxAlphabetNodes; i++ { + for i := range constants.MaxAlphabetNodes { letter = innerring.GlagoliticLetter(i).String() p := filepath.Join(walletDir, letter+".json") var w *wallet.Wallet @@ -53,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er if errors.Is(err, os.ErrNotExist) { err = nil } else { - err = fmt.Errorf("can't open wallet: %w", err) + err = fmt.Errorf("can't open alphabet wallet: %w", err) } break } @@ -87,16 +107,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er return wallets, nil } -func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) { - return actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: committeeAcc.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: committeeAcc, - }}) -} - func ReadContract(ctrPath, ctrName string) (*ContractState, error) { rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef")) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index e127ca545..176356378 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - if w.Err != nil { - panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) - } + assert.NoError(w.Err, "can't wrap register script") } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 4c6607f9a..7b7597d91 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,21 +1,18 @@ package initialize import ( - "errors" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,7 +27,8 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - regPrice, err := getCandidateRegisterPrice(c) + reader := neo.NewReader(c.ReadOnlyInvoker) + regPrice, err := reader.GetRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -42,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - if w.Err != nil { - panic(fmt.Sprintf("BUG: %v", w.Err)) - } + assert.NoError(w.Err) signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), @@ -116,7 +112,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c, neoHash) + ok, err := transferNEOFinished(c) if ok || err != nil { return err } @@ -139,33 +135,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) +func transferNEOFinished(c *helper.InitializeContext) (bool, error) { + r := neo.NewReader(c.ReadOnlyInvoker) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } - -var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") - -func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { - switch c.Client.(type) { - case *rpcclient.Client: - inv := invoker.New(c.Client, nil) - reader := neo.NewReader(inv) - return reader.GetRegisterPrice() - default: - neoHash := neo.Hash - res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) - if err != nil { - return 0, err - } - if len(res.Stack) == 0 { - return 0, errGetPriceInvalid - } - bi, err := res.Stack[0].TryInteger() - if err != nil || !bi.IsInt64() { - return 0, errGetPriceInvalid - } - return bi.Int64(), nil - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go index a6815ee13..05bc83a8b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go @@ -1,6 +1,8 @@ package initialize import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/io" @@ -29,10 +31,14 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error { callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs) if err := c.SendCommitteeTx(w.Bytes(), false); err != nil { - return err + return fmt.Errorf("send committee transaction: %w", err) } - return c.AwaitTx() + err := c.AwaitTx() + if err != nil { + err = fmt.Errorf("await committee transaction: %w", err) + } + return err } func setRolesFinished(c *helper.InitializeContext) (bool, error) { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go index 6c52aa2ab..9bc51c055 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go @@ -62,7 +62,7 @@ func testInitialize(t *testing.T, committeeSize int) { v := viper.GetViper() require.NoError(t, generateTestData(testdataDir, committeeSize)) - v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) + v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) // Set to the path or remove the next statement to download from the network. require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath)) @@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error { } var pubs []string - for i := 0; i < size; i++ { + for i := range size { p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json") w, err := wallet.NewWalletFromFile(p) if err != nil { @@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error { } func setTestCredentials(v *viper.Viper, size int) { - for i := 0; i < size; i++ { + for i := range size { v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10)) } v.Set("credentials.contract", constants.TestContractPassword) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index b7102fa13..bb684b3a9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -3,6 +3,7 @@ package initialize import ( "fmt" "math/big" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" @@ -21,17 +22,16 @@ import ( ) const ( - gasInitialTotalSupply = 30000000 * native.GASFactor // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor - // alphabetGasRatio is a coefficient that defines the threshold below which - // the balance of the alphabet node is considered not replenished. The value - // of this coefficient is determined empirically. - alphabetGasRatio = 5 ) +func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { + return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +} + func transferFunds(c *helper.InitializeContext) error { ok, err := transferFundsFinished(c) if ok || err != nil { @@ -41,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error { return err } + version, err := c.Client.GetVersion() + if err != nil { + return err + } + var transfers []transferTarget for _, acc := range c.Accounts { to := acc.Contract.ScriptHash() @@ -58,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2, + Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), }, transferTarget{ Token: neo.Hash, @@ -79,12 +84,26 @@ func transferFunds(c *helper.InitializeContext) error { return c.AwaitTx() } +// transferFundsFinished checks balances of accounts we transfer GAS to. +// The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - acc := c.Accounts[0] - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(acc.Contract.ScriptHash()) - return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err + res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) + if err != nil { + return false, err + } + + version, err := c.Client.GetVersion() + if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { + return false, err + } + + res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) + if err != nil { + return false, err + } + + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { @@ -144,5 +163,17 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients if err != nil { return nil, fmt.Errorf("can't create actor: %w", err) } - return act.MakeRun(w.Bytes()) + tx, err := act.MakeRun(w.Bytes()) + if err != nil { + sum := make(map[util.Uint160]int64) + for _, recipient := range recipients { + sum[recipient.Token] += recipient.Amount + } + detail := make([]string, 0, len(sum)) + for _, value := range sum { + detail = append(detail, fmt.Sprintf("amount=%v", value)) + } + err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err) + } + return tx, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go index b7885c512..50f14e728 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go @@ -2,7 +2,6 @@ package initialize import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -32,7 +31,7 @@ var Cmd = &cobra.Command{ _ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag)) _ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag)) _ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag)) - _ = viper.BindPFlag(constants.ProtoConfigPath, cmd.Flags().Lookup(constants.ProtoConfigPath)) + _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath)) }, RunE: initializeSideChainCmd, } @@ -48,7 +47,7 @@ func initInitCmd() { // Defaults are taken from neo-preodolenie. Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee") Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee") - Cmd.Flags().String(constants.ProtoConfigPath, "", "Path to the consensus node configuration") + Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration") Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag) } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go index df9a03fd1..94223dbd0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go @@ -12,6 +12,8 @@ import ( "github.com/spf13/viper" ) +const deltaFlag = "delta" + func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { @@ -30,7 +32,8 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { } bw := io.NewBufBinWriter() - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil { + delta, _ := cmd.Flags().GetInt64(deltaFlag) + if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go index d8471bb9a..a689e0ec1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go @@ -13,7 +13,7 @@ import ( ) func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go index 31fda860e..291482e0f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go @@ -12,7 +12,6 @@ var ( Short: "List netmap candidates nodes", PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) }, Run: listNetmapCandidatesNodes, } @@ -35,6 +34,7 @@ func initForceNewEpochCmd() { ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") + ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch") } func init() { diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go new file mode 100644 index 000000000..14f6eb390 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -0,0 +1,93 @@ +package nns + +import ( + "math/big" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func initRegisterCmd() { + Cmd.AddCommand(registerCmd) + registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email") + registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter") + registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") + registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") + registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") + registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + + _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) +} + +func registerDomain(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + email, _ := cmd.Flags().GetString(nnsEmailFlag) + refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag) + retry, _ := cmd.Flags().GetInt64(nnsRetryFlag) + expire, _ := cmd.Flags().GetInt64(nnsExpireFlag) + ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag) + + h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh), + big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl)) + commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err) + + cmd.Println("Waiting for transaction to persist...") + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "register domain error: %w", err) + cmd.Println("Domain registered successfully") +} + +func initDeleteCmd() { + Cmd.AddCommand(deleteCmd) + deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + + _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) +} + +func deleteDomain(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + h, vub, err := c.DeleteDomain(name) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) + cmd.Println("Domain deleted successfully") +} + +func initSetAdminCmd() { + Cmd.AddCommand(setAdminCmd) + setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) + _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) + + _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) +} + +func setAdmin(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) + commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) + h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) + cmd.Println("Set admin successfully") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 0eaa5ac58..e49f62256 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -1,25 +1,67 @@ package nns import ( + "errors" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/util" "github.com/spf13/cobra" "github.com/spf13/viper" ) -func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, util.Uint160) { +func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { v := viper.GetViper() - c, err := helper.GetN3Client(v) + c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c) + alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) + walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) + adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) + + var ( + alphabet *helper.AlphabetWallets + regularWallets []*helper.RegularWallets + ) + + if alphabetWalletPath != "" { + alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} + } + + if walletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) + } + + if adminWalletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) + } + + if alphabet == nil && regularWallets == nil { + commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) + } + + ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) nnsCs, err := helper.GetContractByID(r, 1) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash + return client.New(ac, nnsCs.Hash), ac +} + +func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { + c, err := helper.NewRemoteClient(viper.GetViper()) + commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) + + inv := invoker.New(c, nil) + r := management.NewReader(inv) + nnsCs, err := helper.GetContractByID(r, 1) + commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) + + return client.NewReader(inv, nnsCs.Hash), inv } diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 0e217eb61..9cb47356f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/spf13/cobra" ) @@ -20,6 +19,7 @@ func initAddRecordCmd() { addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) @@ -29,7 +29,6 @@ func initAddRecordCmd() { func initGetRecordsCmd() { Cmd.AddCommand(getRecordsCmd) getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - getRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) @@ -42,13 +41,28 @@ func initDelRecordsCmd() { delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) } +func initDelRecordCmd() { + Cmd.AddCommand(delRecordCmd) + delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag) +} + func addRecord(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) data, _ := cmd.Flags().GetString(nnsRecordDataFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) @@ -64,16 +78,16 @@ func addRecord(cmd *cobra.Command, _ []string) { } func getRecords(cmd *cobra.Command, _ []string) { - c, act, hash := getRPCClient(cmd) + c, inv := nnsReader(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) if recordType == "" { - sid, r, err := unwrap.SessionIterator(act.Invoker.Call(hash, "getAllRecords", name)) + sid, r, err := c.GetAllRecords(name) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) defer func() { - _ = act.Invoker.TerminateSession(sid) + _ = inv.TerminateSession(sid) }() - items, err := act.Invoker.TraverseIterator(sid, &r, 0) + items, err := inv.TraverseIterator(sid, &r, 0) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) for len(items) != 0 { for j := range items { @@ -84,7 +98,7 @@ func getRecords(cmd *cobra.Command, _ []string) { recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())), string(bs)) } - items, err = act.Invoker.TraverseIterator(sid, &r, 0) + items, err = inv.TraverseIterator(sid, &r, 0) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) } } else { @@ -101,7 +115,7 @@ func getRecords(cmd *cobra.Command, _ []string) { } func delRecords(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) typ, err := getRecordType(recordType) @@ -115,6 +129,22 @@ func delRecords(cmd *cobra.Command, _ []string) { cmd.Println("Records removed successfully") } +func delRecord(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + name, _ := cmd.Flags().GetString(nnsNameFlag) + data, _ := cmd.Flags().GetString(nnsRecordDataFlag) + recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) + typ, err := getRecordType(recordType) + commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) + h, vub, err := c.DeleteRecord(name, typ, data) + commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err) + + cmd.Println("Waiting for transaction to persist...") + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "delete records error: %w", err) + cmd.Println("Record removed successfully") +} + func getRecordType(recordType string) (*big.Int, error) { switch strings.ToUpper(recordType) { case "A": diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/register.go deleted file mode 100644 index d05d9f171..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/register.go +++ /dev/null @@ -1,44 +0,0 @@ -package nns - -import ( - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -func initRegisterCmd() { - Cmd.AddCommand(registerCmd) - registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email") - registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter") - registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") - registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") - registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") - - _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) -} - -func registerDomain(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - email, _ := cmd.Flags().GetString(nnsEmailFlag) - refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag) - retry, _ := cmd.Flags().GetInt64(nnsRetryFlag) - expire, _ := cmd.Flags().GetInt64(nnsExpireFlag) - ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag) - - h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh), - big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl)) - commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "register domain error: %w", err) - cmd.Println("Domain registered successfully") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go index b13092240..53bd943f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go @@ -14,7 +14,7 @@ func initRenewCmd() { } func renewDomain(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) h, vub, err := c.Renew(name) commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index e528e4b7b..bb84933c6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -39,9 +39,20 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: registerDomain, } + deleteCmd = &cobra.Command{ + Use: "delete", + Short: "Delete a domain by name", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + }, + Run: deleteDomain, + } renewCmd = &cobra.Command{ Use: "renew", Short: "Increases domain expiration date", @@ -66,6 +77,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: addRecord, } @@ -83,17 +95,42 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecords, } + delRecordCmd = &cobra.Command{ + Use: "delete-record", + Short: "Removes domain record with the specified type and data", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + }, + Run: delRecord, + } + setAdminCmd = &cobra.Command{ + Use: "set-admin", + Short: "Sets admin for domain", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) + }, + Run: setAdmin, + } ) func init() { initTokensCmd() initRegisterCmd() + initDeleteCmd() initRenewCmd() initUpdateCmd() initAddRecordCmd() initGetRecordsCmd() initDelRecordsCmd() + initDelRecordCmd() + initSetAdminCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go index 6e8ffb40a..4ccbb1677 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go @@ -1,24 +1,65 @@ package nns import ( + "math/big" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/spf13/cobra" ) +const ( + verboseDesc = "Include additional information about CNAME record." +) + func initTokensCmd() { Cmd.AddCommand(tokensCmd) tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc) } func listTokens(cmd *cobra.Command, _ []string) { - c, _, _ := getRPCClient(cmd) + c, _ := nnsReader(cmd) it, err := c.Tokens() commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err) for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) { for _, token := range toks { - cmd.Println(string(token)) + output := string(token) + if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose { + cname, err := getCnameRecord(c, token) + commonCmd.ExitOnErr(cmd, "", err) + if cname != "" { + output += " (CNAME: " + cname + ")" + } + } + cmd.Println(output) } } } + +func getCnameRecord(c *client.ContractReader, token []byte) (string, error) { + items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME))) + + // GetRecords returns the error "not an array" if the domain does not contain records. + if err != nil && strings.Contains(err.Error(), "not an array") { + return "", nil + } + + if err != nil { + return "", err + } + + if len(items) == 0 { + return "", nil + } + + record, err := items[0].TryBytes() + if err != nil { + return "", err + } + + return string(record), nil +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go index 3437316e3..c6d77ead6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/update.go @@ -30,7 +30,7 @@ func initUpdateCmd() { } func updateSOA(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) email, _ := cmd.Flags().GetString(nnsEmailFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go index 0a19102ba..e47451e0c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go +++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go @@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error { int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes()) } - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil { + if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index 9b213da4e..3435926c0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "math/big" - "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" @@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error { } accHash := w.GetChangeAddress() - if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil { + addr, _ := cmd.Flags().GetString(walletAccountFlag) + if addr != "" { accHash, err = address.StringToUint160(addr) if err != nil { return fmt.Errorf("invalid address: %s", addr) @@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't find account for %s", accHash) } - prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash)) + prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash)) pass, err := input.ReadPassword(prompt) if err != nil { return fmt.Errorf("can't get password: %v", err) @@ -73,23 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return err } - till := int64(defaultNotaryDepositLifetime) - tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag) - if err != nil { - return err - } - if tillStr != "" { - till, err = strconv.ParseInt(tillStr, 10, 64) - if err != nil || till <= 0 { - return errInvalidNotaryDepositLifetime - } + till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) + if till <= 0 { + return errInvalidNotaryDepositLifetime } return transferGas(cmd, acc, accHash, gasAmount, till) } func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go index 497ff8ea1..d7be2e503 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/root.go @@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() { DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") - DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks") + DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") } func init() { diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index 36547e22c..f2932e87c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error { } func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client:", err) inv := invoker.New(c, nil) @@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee))) - _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte))) - _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice))) + _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) + _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) + _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go index cb575b657..24cda45a6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go @@ -20,23 +20,32 @@ const ( accountAddressFlag = "account" ) +func parseAddresses(cmd *cobra.Command) []util.Uint160 { + var addrs []util.Uint160 + + accs, _ := cmd.Flags().GetStringArray(accountAddressFlag) + for _, acc := range accs { + addr, err := address.StringToUint160(acc) + commonCmd.ExitOnErr(cmd, "invalid account: %w", err) + + addrs = append(addrs, addr) + } + return addrs +} + func addProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "addAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "addAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } func removeProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "removeAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "removeAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } -func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error { +func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("can't initialize context: %w", err) @@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + for _, addr := range addrs { + emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + } if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index 082bc57d1..ad89af2b5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -29,12 +29,16 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) + AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) + RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func init() { diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index 8595483ab..cc8225c7a 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,8 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -40,7 +41,8 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) - rootCmd.AddCommand(storagecfg.RootCmd) + rootCmd.AddCommand(metabase.RootCmd) + rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go deleted file mode 100644 index 77183fb49..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ /dev/null @@ -1,137 +0,0 @@ -package storagecfg - -const configTemplate = `logger: - level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - -node: - wallet: - path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented - address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented - password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented - addresses: # list of addresses announced by Storage node in the Network map - - {{ .AnnouncedAddress }} - attribute_0: UN-LOCODE:{{ .Attribute.Locode }} - relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map - -grpc: - num: 1 # total number of listener endpoints - 0: - endpoint: {{ .Endpoint }} # endpoint for gRPC server - tls:{{if .TLSCert}} - enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) - certificate: {{ .TLSCert }} # path to TLS certificate - key: {{ .TLSKey }} # path to TLS key - {{- else }} - enabled: false # disable TLS for a gRPC connection - {{- end}} - -control: - authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - {{- range .AuthorizedKeys }} - - {{.}}{{end}} - grpc: - endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service - -morph: - dial_timeout: 20s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # use TTL cache for side chain GET operations - rpc_endpoint: # side chain N3 RPC endpoints - {{- range .MorphRPC }} - - address: wss://{{.}}/ws{{end}} -{{if not .Relay }} -storage: - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations - - shard: - default: # section with the default shard parameters - metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) - - blobstor: - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) - depth: 2 # max depth of object tree storage in FS - small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes - compress: true # turn on/off Zstandard compression (level 3) of stored objects - compression_exclude_content_types: - - audio/* - - video/* - - blobovnicza: - size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - depth: 1 # max depth of object tree storage in key-value DB - width: 4 # max width of object tree storage in key-value DB - opened_cache_capacity: 50 # maximum number of opened database files - opened_cache_ttl: 5m # ttl for opened database file - opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - gc: - remover_batch_size: 200 # number of objects to be removed by the garbage collector - remover_sleep_interval: 5m # frequency of the garbage collector invocation - 0: - mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" - - metabase: - path: {{ .MetabasePath }} # path to the metabase - - blobstor: - path: {{ .BlobstorPath }} # path to the blobstor -{{end}}` - -const ( - neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" - balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" - neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" - balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" -) - -var n3config = map[string]struct { - MorphRPC []string - RPC []string - NeoFSContract string - BalanceContract string -}{ - "testnet": { - MorphRPC: []string{ - "rpc01.morph.testnet.fs.neo.org:51331", - "rpc02.morph.testnet.fs.neo.org:51331", - "rpc03.morph.testnet.fs.neo.org:51331", - "rpc04.morph.testnet.fs.neo.org:51331", - "rpc05.morph.testnet.fs.neo.org:51331", - "rpc06.morph.testnet.fs.neo.org:51331", - "rpc07.morph.testnet.fs.neo.org:51331", - }, - RPC: []string{ - "rpc01.testnet.n3.nspcc.ru:21331", - "rpc02.testnet.n3.nspcc.ru:21331", - "rpc03.testnet.n3.nspcc.ru:21331", - "rpc04.testnet.n3.nspcc.ru:21331", - "rpc05.testnet.n3.nspcc.ru:21331", - "rpc06.testnet.n3.nspcc.ru:21331", - "rpc07.testnet.n3.nspcc.ru:21331", - }, - NeoFSContract: neofsTestnetAddress, - BalanceContract: balanceTestnetAddress, - }, - "mainnet": { - MorphRPC: []string{ - "rpc1.morph.fs.neo.org:40341", - "rpc2.morph.fs.neo.org:40341", - "rpc3.morph.fs.neo.org:40341", - "rpc4.morph.fs.neo.org:40341", - "rpc5.morph.fs.neo.org:40341", - "rpc6.morph.fs.neo.org:40341", - "rpc7.morph.fs.neo.org:40341", - }, - RPC: []string{ - "rpc1.n3.nspcc.ru:10331", - "rpc2.n3.nspcc.ru:10331", - "rpc3.n3.nspcc.ru:10331", - "rpc4.n3.nspcc.ru:10331", - "rpc5.n3.nspcc.ru:10331", - "rpc6.n3.nspcc.ru:10331", - "rpc7.n3.nspcc.ru:10331", - }, - NeoFSContract: neofsMainnetAddress, - BalanceContract: balanceMainnetAddress, - }, -} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go deleted file mode 100644 index 127272da5..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ /dev/null @@ -1,433 +0,0 @@ -package storagecfg - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "net" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "text/template" - "time" - - netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/chzyer/readline" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - - "github.com/spf13/cobra" -) - -const ( - walletFlag = "wallet" - accountFlag = "account" -) - -const ( - defaultControlEndpoint = "localhost:8090" - defaultDataEndpoint = "localhost" -) - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "storage-config [-w wallet] [-a acccount] []", - Short: "Section for storage node configuration commands", - Run: storageConfig, -} - -func init() { - fs := RootCmd.Flags() - - fs.StringP(walletFlag, "w", "", "Path to wallet") - fs.StringP(accountFlag, "a", "", "Wallet account") -} - -type config struct { - AnnouncedAddress string - AuthorizedKeys []string - ControlEndpoint string - Endpoint string - TLSCert string - TLSKey string - MorphRPC []string - Attribute struct { - Locode string - } - Wallet struct { - Path string - Account string - Password string - } - Relay bool - BlobstorPath string - MetabasePath string -} - -func storageConfig(cmd *cobra.Command, args []string) { - outPath := getOutputPath(args) - - historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") - readline.SetHistoryPath(historyPath) - - var c config - - c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) - if c.Wallet.Path == "" { - c.Wallet.Path = getPath("Path to the storage node wallet: ") - } - - w, err := wallet.NewWalletFromFile(c.Wallet.Path) - fatalOnErr(err) - - fillWalletAccount(cmd, &c, w) - - accH, err := flags.ParseAddress(c.Wallet.Account) - fatalOnErr(err) - - acc := w.GetAccount(accH) - if acc == nil { - fatalOnErr(errors.New("can't find account in wallet")) - } - - c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account)) - fatalOnErr(err) - - err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) - fatalOnErr(err) - - c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) - - network := readNetwork(cmd) - - c.MorphRPC = n3config[network].MorphRPC - - depositGas(cmd, acc, network) - - c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") - - endpoint := getDefaultEndpoint(cmd, &c) - c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) - if c.Endpoint == "" { - c.Endpoint = endpoint - } - - c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) - if c.ControlEndpoint == "" { - c.ControlEndpoint = defaultControlEndpoint - } - - c.TLSCert = getPath("TLS Certificate (optional): ") - if c.TLSCert != "" { - c.TLSKey = getPath("TLS Key: ") - } - - c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") - if !c.Relay { - p := getPath("Path to the storage directory (all available storage will be used): ") - c.BlobstorPath = filepath.Join(p, "blob") - c.MetabasePath = filepath.Join(p, "meta") - } - - out := applyTemplate(c) - fatalOnErr(os.WriteFile(outPath, out, 0o644)) - - cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") -} - -func getDefaultEndpoint(cmd *cobra.Command, c *config) string { - var addr, port string - for { - c.AnnouncedAddress = getString("Publicly announced address: ") - validator := netutil.Address{} - err := validator.FromString(c.AnnouncedAddress) - if err != nil { - cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") - continue - } - uriAddr, err := url.Parse(validator.URIAddr()) - if err != nil { - panic(fmt.Errorf("unexpected error: %w", err)) - } - addr = uriAddr.Hostname() - port = uriAddr.Port() - ip, err := net.ResolveIPAddr("ip", addr) - if err != nil { - cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) - continue - } - - if !ip.IP.IsGlobalUnicast() { - cmd.Println("IP must be global unicast.") - continue - } - cmd.Printf("Resolved IP address: %s\n", ip.String()) - - _, err = strconv.ParseUint(port, 10, 16) - if err != nil { - cmd.Println("Port must be an integer.") - continue - } - - break - } - return net.JoinHostPort(defaultDataEndpoint, port) -} - -func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { - c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) - if c.Wallet.Account == "" { - addr := address.Uint160ToString(w.GetChangeAddress()) - c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) - if c.Wallet.Account == "" { - c.Wallet.Account = addr - } - } -} - -func readNetwork(cmd *cobra.Command) string { - var network string - for { - network = getString("Choose network [mainnet]/testnet: ") - switch network { - case "": - network = "mainnet" - case "testnet", "mainnet": - default: - cmd.Println(`Network must be either "mainnet" or "testnet"`) - continue - } - break - } - return network -} - -func getOutputPath(args []string) string { - if len(args) != 0 { - return args[0] - } - outPath := getPath("File to write config at [./config.yml]: ") - if outPath == "" { - outPath = "./config.yml" - } - return outPath -} - -func getWalletAccount(w *wallet.Wallet, prompt string) string { - addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) - for i := range w.Accounts { - addrs[i] = readline.PcItem(w.Accounts[i].Address) - } - - readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) - defer readline.SetAutoComplete(nil) - - s, err := readline.Line(prompt) - fatalOnErr(err) - return strings.TrimSpace(s) // autocompleter can return a string with a trailing space -} - -func getString(prompt string) string { - s, err := readline.Line(prompt) - fatalOnErr(err) - if s != "" { - _ = readline.AddHistory(s) - } - return s -} - -type filenameCompleter struct{} - -func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { - prefix := string(line[:pos]) - dir := filepath.Dir(prefix) - de, err := os.ReadDir(dir) - if err != nil { - return nil, 0 - } - - for i := range de { - name := filepath.Join(dir, de[i].Name()) - if strings.HasPrefix(name, prefix) { - tail := []rune(strings.TrimPrefix(name, prefix)) - if de[i].IsDir() { - tail = append(tail, filepath.Separator) - } - newLine = append(newLine, tail) - } - } - if pos != 0 { - return newLine, pos - len([]rune(dir)) - } - return newLine, 0 -} - -func getPath(prompt string) string { - readline.SetAutoComplete(filenameCompleter{}) - defer readline.SetAutoComplete(nil) - - p, err := readline.Line(prompt) - fatalOnErr(err) - - if p == "" { - return p - } - - _ = readline.AddHistory(p) - - abs, err := filepath.Abs(p) - if err != nil { - fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) - } - - return abs -} - -func getConfirmation(def bool, prompt string) bool { - for { - s, err := readline.Line(prompt) - fatalOnErr(err) - - switch strings.ToLower(s) { - case "y", "yes": - return true - case "n", "no": - return false - default: - if len(s) == 0 { - return def - } - } - } -} - -func applyTemplate(c config) []byte { - tmpl, err := template.New("config").Parse(configTemplate) - fatalOnErr(err) - - b := bytes.NewBuffer(nil) - fatalOnErr(tmpl.Execute(b, c)) - - return b.Bytes() -} - -func fatalOnErr(err error) { - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { - sideClient := initClient(n3config[network].MorphRPC) - balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) - - sideActor, err := actor.NewSimple(sideClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) - } - - sideGas := nep17.NewReader(sideActor, balanceHash) - accSH := acc.Contract.ScriptHash() - - balance, err := sideGas.BalanceOf(accSH) - if err != nil { - fatalOnErr(fmt.Errorf("side chain balance: %w", err)) - } - - ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", - fixedn.ToString(balance, 12))) - if !ok { - return - } - - amountStr := getString("Enter amount in GAS: ") - amount, err := fixedn.FromString(amountStr, 8) - if err != nil { - fatalOnErr(fmt.Errorf("invalid amount: %w", err)) - } - - mainClient := initClient(n3config[network].RPC) - neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) - - mainActor, err := actor.NewSimple(mainClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) - } - - mainGas := nep17.New(mainActor, gas.Hash) - - txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) - if err != nil { - fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) - } - - cmd.Print("Waiting for transactions to persist.") - tick := time.NewTicker(time.Second / 2) - defer tick.Stop() - - timer := time.NewTimer(time.Second * 20) - defer timer.Stop() - - at := trigger.Application - -loop: - for { - select { - case <-tick.C: - _, err := mainClient.GetApplicationLog(txHash, &at) - if err == nil { - cmd.Print("\n") - break loop - } - cmd.Print(".") - case <-timer.C: - cmd.Printf("\nTimeout while waiting for transaction to persist.\n") - if getConfirmation(false, "Continue configuration? yes/[no]: ") { - return - } - os.Exit(1) - } - } -} - -func initClient(rpc []string) *rpcclient.Client { - var c *rpcclient.Client - var err error - - shuffled := make([]string, len(rpc)) - copy(shuffled, rpc) - rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) - - for _, endpoint := range shuffled { - c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ - DialTimeout: time.Second * 2, - RequestTimeout: time.Second * 5, - }) - if err != nil { - continue - } - if err = c.Init(); err != nil { - continue - } - return c - } - - fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) - panic("unreachable") -} diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md index 04563b7af..52c0e9b9b 100644 --- a/cmd/frostfs-cli/docs/sessions.md +++ b/cmd/frostfs-cli/docs/sessions.md @@ -72,4 +72,3 @@ All other `object` sub-commands support only static sessions (2). List of commands supporting sessions (static only): - `create` - `delete` -- `set-eacl` diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index a6d9968c5..299d0a830 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -2,19 +2,19 @@ package internal import ( "bytes" + "cmp" "context" "errors" "fmt" "io" - "sort" - "strings" + "os" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -76,13 +76,29 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - sort.Slice(list, func(i, j int) bool { - lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString() - return strings.Compare(lhs, rhs) < 0 - }) + slices.SortFunc(list, cid.ID.Cmp) return list } +func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) { + cliPrm := &client.PrmContainerListStream{ + XHeaders: prm.XHeaders, + OwnerID: prm.OwnerID, + Session: prm.Session, + } + rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm) + if err != nil { + return fmt.Errorf("init container list: %w", err) + } + + err = rdr.Iterate(processCnr) + if err != nil { + return fmt.Errorf("read container list: %w", err) + } + + return +} + // PutContainerPrm groups parameters of PutContainer operation. type PutContainerPrm struct { Client *client.Client @@ -189,54 +205,6 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon return } -// EACLPrm groups parameters of EACL operation. -type EACLPrm struct { - Client *client.Client - ClientParams client.PrmContainerEACL -} - -// EACLRes groups the resulting values of EACL operation. -type EACLRes struct { - cliRes *client.ResContainerEACL -} - -// EACL returns requested eACL table. -func (x EACLRes) EACL() eacl.Table { - return x.cliRes.Table() -} - -// EACL reads eACL table from FrostFS by container ID. -// -// Returns any error which prevented the operation from completing correctly in error return. -func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) { - res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams) - - return -} - -// SetEACLPrm groups parameters of SetEACL operation. -type SetEACLPrm struct { - Client *client.Client - ClientParams client.PrmContainerSetEACL -} - -// SetEACLRes groups the resulting values of SetEACL operation. -type SetEACLRes struct{} - -// SetEACL requests to save an eACL table in FrostFS. -// -// Operation is asynchronous and no guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// Success can be verified by reading by container identifier. -// -// Returns any error which prevented the operation from completing correctly in error return. -func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) { - _, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams) - - return -} - // NetworkInfoPrm groups parameters of NetworkInfo operation. type NetworkInfoPrm struct { Client *client.Client @@ -611,13 +579,6 @@ type HeadObjectPrm struct { commonObjectPrm objectAddressPrm rawPrm - - mainOnly bool -} - -// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API. -func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) { - x.mainOnly = v } // HeadObjectRes groups the resulting values of HeadObject operation. @@ -712,9 +673,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes for { n, ok = rdr.Read(buf) - for i := 0; i < n; i++ { - list = append(list, buf[i]) - } + list = append(list, buf[:n]...) if !ok { break } @@ -725,10 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - sort.Slice(list, func(i, j int) bool { - lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString() - return strings.Compare(lhs, rhs) < 0 - }) + slices.SortFunc(list, oid.ID.Cmp) return &SearchObjectsRes{ ids: list, @@ -892,3 +848,71 @@ func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncCont return new(SyncContainerRes), nil } + +// PatchObjectPrm groups parameters of PatchObject operation. +type PatchObjectPrm struct { + commonObjectPrm + objectAddressPrm + + NewAttributes []objectSDK.Attribute + + ReplaceAttribute bool + + NewSplitHeader *objectSDK.SplitHeader + + PayloadPatches []PayloadPatch +} + +type PayloadPatch struct { + Range objectSDK.Range + + PayloadPath string +} + +type PatchRes struct { + OID oid.ID +} + +func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { + patchPrm := client.PrmObjectPatch{ + XHeaders: prm.xHeaders, + BearerToken: prm.bearerToken, + Session: prm.sessionToken, + Address: prm.objAddr, + } + + slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int { + return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset()) + }) + + patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm) + if err != nil { + return nil, fmt.Errorf("init payload reading: %w", err) + } + + if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ + NewSplitHeader: prm.NewSplitHeader, + NewAttributes: prm.NewAttributes, + ReplaceAttributes: prm.ReplaceAttribute, + }) { + for _, pp := range prm.PayloadPatches { + payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, err + } + applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile) + _ = payloadFile.Close() + if !applied { + break + } + } + } + + res, err := patcher.Close(ctx) + if err != nil { + return nil, err + } + return &PatchRes{ + OID: res.ObjectID(), + }, nil +} diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index f7c48b871..1eadfa2e1 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,8 +56,9 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, } if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 { diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go index 30c2f2b1a..10863ed1e 100644 --- a/cmd/frostfs-cli/internal/common/tracing.go +++ b/cmd/frostfs-cli/internal/common/tracing.go @@ -2,7 +2,7 @@ package common import ( "context" - "sort" + "slices" "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" @@ -45,15 +45,11 @@ func StartClientCommandSpan(cmd *cobra.Command) { }) commonCmd.ExitOnErr(cmd, "init tracing: %w", err) - var components sort.StringSlice + var components []string for c := cmd; c != nil; c = c.Parent() { components = append(components, c.Name()) } - for i, j := 0, len(components)-1; i < j; { - components.Swap(i, j) - i++ - j-- - } + slices.Reverse(components) operation := strings.Join(components, ".") ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation) diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 88321176f..6ed21e107 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "TTL value in request meta header" + TTLUsage = "The maximum number of intermediate nodes in the request route" XHeadersKey = "xhdr" XHeadersShorthand = "x" diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go index cd46d63eb..fad1f6183 100644 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ b/cmd/frostfs-cli/internal/commonflags/flags.go @@ -28,7 +28,7 @@ const ( RPC = "rpc-endpoint" RPCShorthand = "r" RPCDefault = "" - RPCUsage = "Remote node address (as 'multiaddr' or ':')" + RPCUsage = "Remote node address (':' or 'grpcs://:')" Timeout = "timeout" TimeoutShorthand = "t" diff --git a/cmd/frostfs-cli/internal/key/key_test.go b/cmd/frostfs-cli/internal/key/key_test.go index e3127a3fe..37e4fd4ee 100644 --- a/cmd/frostfs-cli/internal/key/key_test.go +++ b/cmd/frostfs-cli/internal/key/key_test.go @@ -24,6 +24,8 @@ var testCmd = &cobra.Command{ } func Test_getOrGenerate(t *testing.T) { + t.Cleanup(viper.Reset) + dir := t.TempDir() wallPath := filepath.Join(dir, "wallet.json") diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go index c6622da25..f4039283f 100644 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go @@ -1,45 +1,19 @@ package apemanager import ( - "encoding/hex" - "errors" + "fmt" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/spf13/cobra" ) -const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - ruleFlag = "rule" - pathFlag = "path" -) - -const ( - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" -) - -const ( - defaultNamespace = "" - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" -) - -var errUnknownTargetType = errors.New("unknown target type") - var addCmd = &cobra.Command{ Use: "add", Short: "Add rule chain for a target", @@ -50,55 +24,28 @@ var addCmd = &cobra.Command{ } func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) { - typ, _ := cmd.Flags().GetString(targetTypeFlag) - name, _ := cmd.Flags().GetString(targetNameFlag) + t := apeCmd.ParseTarget(cmd) - ct.Name = name + ct.Name = t.Name - switch typ { - case namespaceTarget: + switch t.Type { + case engine.Namespace: ct.TargetType = apeSDK.TargetTypeNamespace - case containerTarget: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + case engine.Container: ct.TargetType = apeSDK.TargetTypeContainer - case userTarget: + case engine.User: ct.TargetType = apeSDK.TargetTypeUser - case groupTarget: + case engine.Group: ct.TargetType = apeSDK.TargetTypeGroup default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type)) } return ct } func parseChain(cmd *cobra.Command) apeSDK.Chain { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - - chainIDRaw := []byte(chainID) - - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - - chain := new(apechain.Chain) - chain.ID = apechain.ID(chainIDRaw) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed")) - } - - cmd.Println("Parsed chain:") - util.PrintHumanReadableAPEChain(cmd, chain) - - serialized := chain.Bytes() + c := apeCmd.ParseChain(cmd) + serialized := c.Bytes() return apeSDK.Chain{ Raw: serialized, } @@ -127,13 +74,13 @@ func initAddCmd() { commonflags.Init(addCmd) ff := addCmd.Flags() - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = addCmd.MarkFlagRequired(targetTypeFlag) - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) + ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) + ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) - addCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag) + addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) } diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go index a5dd44614..b07ecc52f 100644 --- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/list_chain.go @@ -4,8 +4,8 @@ import ( internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" @@ -35,7 +35,7 @@ func list(cmd *cobra.Command, _ []string) { for _, respChain := range resp.Chains { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw)) - apeutil.PrintHumanReadableAPEChain(cmd, &chain) + apeCmd.PrintHumanReadableAPEChain(cmd, &chain) } } @@ -43,7 +43,7 @@ func initListCmd() { commonflags.Init(listCmd) ff := listCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = listCmd.MarkFlagRequired(targetTypeFlag) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go index 179bd5c9e..136ca81c3 100644 --- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go @@ -1,29 +1,23 @@ package apemanager import ( - "encoding/hex" - "errors" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) -var ( - errEmptyChainID = errors.New("chain id cannot be empty") - - removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove rule chain for a target", - Run: remove, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, - } -) +var removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove rule chain for a target", + Run: remove, + PersistentPreRun: func(cmd *cobra.Command, _ []string) { + commonflags.Bind(cmd) + }, +} func remove(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) @@ -31,19 +25,9 @@ func remove(cmd *cobra.Command, _ []string) { key := key.Get(cmd) cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID) - } + chainID := apeCmd.ParseChainID(cmd) chainIDRaw := []byte(chainID) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - _, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{ ChainTarget: target, ChainID: chainIDRaw, @@ -58,9 +42,10 @@ func initRemoveCmd() { commonflags.Init(removeCmd) ff := removeCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = removeCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) } diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index a86506c37..0927788ba 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,6 +44,7 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) + _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 482c0027e..9632061f1 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -1,31 +1,20 @@ package bearer import ( - "errors" "fmt" "os" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) -var ( - errChainIDCannotBeEmpty = errors.New("chain id cannot be empty") - errRuleIsNotParsed = errors.New("rule is not passed") -) - const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - ruleFlag = "rule" - pathFlag = "path" - outputFlag = "output" + outputFlag = "output" ) var generateAPEOverrideCmd = &cobra.Command{ @@ -40,7 +29,7 @@ Generated APE override can be dumped to a file in JSON format that is passed to } func genereateAPEOverride(cmd *cobra.Command, _ []string) { - c := parseChain(cmd) + c := apeCmd.ParseChain(cmd) targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag) var cid cidSDK.ID @@ -63,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) { outputPath, _ := cmd.Flags().GetString(outputFlag) if outputPath != "" { - err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644) + err := os.WriteFile(outputPath, overrideMarshalled, 0o644) commonCmd.ExitOnErr(cmd, "dump error: %w", err) } else { fmt.Print("\n") @@ -77,39 +66,11 @@ func init() { ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.") _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag) - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") + ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") + ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") + ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override") _ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag) } - -func parseChainID(cmd *cobra.Command) apechain.ID { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", - errChainIDCannotBeEmpty) - } - return apechain.ID(chainID) -} - -func parseChain(cmd *cobra.Command) *apechain.Chain { - chain := new(apechain.Chain) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed) - } - - chain.ID = parseChainID(cmd) - - cmd.Println("Parsed chain:") - parseutil.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index c6f576908..30f995180 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -7,22 +7,20 @@ import ( "strings" "time" - containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" ) var ( - containerACL string containerPolicy string containerAttributes []string containerAwait bool @@ -89,9 +87,6 @@ It will be stored in sidechain when inner ring will accepts it.`, err = parseAttributes(&cnr, containerAttributes) commonCmd.ExitOnErr(cmd, "", err) - var basicACL acl.Basic - commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL)) - tok := getSession(cmd) if tok != nil { @@ -105,7 +100,6 @@ It will be stored in sidechain when inner ring will accepts it.`, } cnr.SetPlacementPolicy(*placementPolicy) - cnr.SetBasicACL(basicACL) var syncContainerPrm internalclient.SyncContainerPrm syncContainerPrm.SetClient(cli) @@ -139,7 +133,7 @@ It will be stored in sidechain when inner ring will accepts it.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) @@ -163,10 +157,6 @@ func initContainerCreateCmd() { flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage) flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - - flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'", - acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended, - )) flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it") flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2") flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted") diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go index e5425bf25..c20188884 100644 --- a/cmd/frostfs-cli/modules/container/delete.go +++ b/cmd/frostfs-cli/modules/container/delete.go @@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index 8c4ab14f8..fac6eb2cd 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - cnr.IterateAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf("\t%s=%s\n", key, val) - }) + } cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/get_eacl.go b/cmd/frostfs-cli/modules/container/get_eacl.go deleted file mode 100644 index 4ed1c82e1..000000000 --- a/cmd/frostfs-cli/modules/container/get_eacl.go +++ /dev/null @@ -1,68 +0,0 @@ -package container - -import ( - "os" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" -) - -var getExtendedACLCmd = &cobra.Command{ - Use: "get-eacl", - Short: "Get extended ACL table of container", - Long: `Get extended ACL table of container`, - Run: func(cmd *cobra.Command, _ []string) { - id := parseContainerID(cmd) - pk := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - eaclPrm := internalclient.EACLPrm{ - Client: cli, - ClientParams: client.PrmContainerEACL{ - ContainerID: &id, - }, - } - - res, err := internalclient.EACL(cmd.Context(), eaclPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - eaclTable := res.EACL() - - if containerPathTo == "" { - cmd.Println("eACL: ") - common.PrettyPrintJSON(cmd, &eaclTable, "eACL") - - return - } - - var data []byte - - if containerJSON { - data, err = eaclTable.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err) - } else { - data, err = eaclTable.Marshal() - commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err) - } - - cmd.Println("dumping data to file:", containerPathTo) - - err = os.WriteFile(containerPathTo, data, 0o644) - commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err) - }, -} - -func initContainerGetEACLCmd() { - commonflags.Init(getExtendedACLCmd) - - flags := getExtendedACLCmd.Flags() - - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)") - flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format") -} diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index a1410d7a0..e4a023d91 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -1,16 +1,16 @@ package container import ( - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // flags of list command. @@ -54,49 +54,60 @@ var listContainersCmd = &cobra.Command{ var prm internalclient.ListContainersPrm prm.SetClient(cli) - prm.Account = idUser - - res, err := internalclient.ListContainers(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - + prm.OwnerID = idUser prmGet := internalclient.GetContainerPrm{ Client: cli, } + var containerIDs []cid.ID + + err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool { + printContainer(cmd, prmGet, id) + return false + }) + if err == nil { + return + } + + if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented { + res, err := internalclient.ListContainers(cmd.Context(), prm) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + containerIDs = res.SortedIDList() + } else { + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + } - containerIDs := res.SortedIDList() for _, cnrID := range containerIDs { - if flagVarListName == "" && !flagVarListPrintAttr { - cmd.Println(cnrID.String()) - continue - } - - cnrID := cnrID - prmGet.ClientParams.ContainerID = &cnrID - res, err := internalclient.GetContainer(cmd.Context(), prmGet) - if err != nil { - cmd.Printf(" failed to read attributes: %v\n", err) - continue - } - - cnr := res.Container() - if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { - continue - } - cmd.Println(cnrID.String()) - - if flagVarListPrintAttr { - cnr.IterateAttributes(func(key, val string) { - if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) { - // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97 - // Use dedicated method to skip system attributes. - cmd.Printf(" %s: %s\n", key, val) - } - }) - } + printContainer(cmd, prmGet, cnrID) } }, } +func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) { + if flagVarListName == "" && !flagVarListPrintAttr { + cmd.Println(id.String()) + return + } + + prmGet.ClientParams.ContainerID = &id + res, err := internalclient.GetContainer(cmd.Context(), prmGet) + if err != nil { + cmd.Printf(" failed to read attributes: %v\n", err) + return + } + + cnr := res.Container() + if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { + return + } + cmd.Println(id.String()) + + if flagVarListPrintAttr { + for key, val := range cnr.Attributes() { + cmd.Printf(" %s: %s\n", key, val) + } + } +} + func initContainerListContainersCmd() { commonflags.Init(listContainersCmd) diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go index ff2f8cf45..d5850359d 100644 --- a/cmd/frostfs-cli/modules/container/list_objects.go +++ b/cmd/frostfs-cli/modules/container/list_objects.go @@ -1,9 +1,6 @@ package container import ( - "strings" - - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" @@ -67,14 +64,8 @@ var listContainerObjectsCmd = &cobra.Command{ resHead, err := internalclient.HeadObject(cmd.Context(), prmHead) if err == nil { - attrs := resHead.Header().Attributes() - for i := range attrs { - attrKey := attrs[i].Key() - if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) { - // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97 - // Use dedicated method to skip system attributes. - cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value()) - } + for _, attr := range resHead.Header().UserAttributes() { + cmd.Printf(" %s: %s\n", attr.Key(), attr.Value()) } } else { cmd.Printf(" failed to read attributes: %v\n", err) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 095ab6438..cf4862b4a 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -1,12 +1,13 @@ package container import ( - "bufio" "encoding/hex" "encoding/json" + "errors" "fmt" - "io" + "maps" "os" + "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -14,20 +15,22 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/chzyer/readline" "github.com/spf13/cobra" "github.com/spf13/viper" ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo + cmd *cobra.Command + nodes map[string]netmap.NodeInfo + console *readline.Instance } -func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) { +func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { return &policyPlaygroundREPL{ cmd: cmd, nodes: map[string]netmap.NodeInfo{}, - }, nil + } } func (repl *policyPlaygroundREPL) handleLs(args []string) error { @@ -37,10 +40,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - node.IterateAttributes(func(k, v string) { + for k, v := range node.Attributes() { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - }) - fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + } + fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -147,12 +150,29 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Printf("\t%2d: %v\n", i+1, ids) + fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) } return nil } +func (repl *policyPlaygroundREPL) handleHelp(args []string) error { + if len(args) != 0 { + if _, ok := commands[args[0]]; !ok { + return fmt.Errorf("unknown command: %q", args[0]) + } + fmt.Fprintln(repl.console, commands[args[0]].usage) + return nil + } + + commandList := slices.Collect(maps.Keys(commands)) + slices.Sort(commandList) + for _, command := range commandList { + fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) + } + return nil +} + func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -163,6 +183,105 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } +type commandDescription struct { + descriprion string + usage string +} + +var commands = map[string]commandDescription{ + "list": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + list + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "ls": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + ls + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "add": { + descriprion: "Add a new node: add attr=value", + usage: `Add a new node +Example of usage: + add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, + }, + + "load": { + descriprion: "Load netmap from file: load ", + usage: `Load netmap from file +Example of usage: + load "netmap.json" +File format (netmap.json): +{ + "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { + "continent": "Europe", + "country": "Poland" + }, + "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { + "continent": "Antarctica", + "country": "Heard Island" + } +}`, + }, + + "remove": { + descriprion: "Remove a node: remove ", + usage: `Remove a node +Example of usage: + remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "rm": { + descriprion: "Remove a node: rm ", + usage: `Remove a node +Example of usage: + rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "eval": { + descriprion: "Evaluate a policy: eval ", + usage: `Evaluate a policy +Example of usage: + eval REP 2`, + }, + + "help": { + descriprion: "Show available commands", + }, +} + +func (repl *policyPlaygroundREPL) handleCommand(args []string) error { + if len(args) == 0 { + return nil + } + + switch args[0] { + case "list", "ls": + return repl.handleLs(args[1:]) + case "add": + return repl.handleAdd(args[1:]) + case "load": + return repl.handleLoad(args[1:]) + case "remove", "rm": + return repl.handleRemove(args[1:]) + case "eval": + return repl.handleEval(args[1:]) + case "help": + return repl.handleHelp(args[1:]) + } + return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) +} + func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { key := key.GetOrGenerate(repl.cmd) @@ -180,36 +299,51 @@ func (repl *policyPlaygroundREPL) run() error { } } - cmdHandlers := map[string]func([]string) error{ - "list": repl.handleLs, - "ls": repl.handleLs, - "add": repl.handleAdd, - "load": repl.handleLoad, - "remove": repl.handleRemove, - "rm": repl.handleRemove, - "eval": repl.handleEval, + if len(viper.GetString(netmapConfigPath)) > 0 { + err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) + commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) } - for reader := bufio.NewReader(os.Stdin); ; { - fmt.Print("> ") - line, err := reader.ReadString('\n') + + var cfgCompleter []readline.PrefixCompleterInterface + var helpSubItems []readline.PrefixCompleterInterface + + for name := range commands { + if name != "help" { + cfgCompleter = append(cfgCompleter, readline.PcItem(name)) + helpSubItems = append(helpSubItems, readline.PcItem(name)) + } + } + + cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) + completer := readline.NewPrefixCompleter(cfgCompleter...) + rl, err := readline.NewEx(&readline.Config{ + Prompt: "> ", + InterruptPrompt: "^C", + AutoComplete: completer, + }) + if err != nil { + return fmt.Errorf("error initializing readline: %w", err) + } + repl.console = rl + defer rl.Close() + + var exit bool + for { + line, err := rl.Readline() if err != nil { - if err == io.EOF { - return nil + if errors.Is(err, readline.ErrInterrupt) { + if exit { + return nil + } + exit = true + continue } - return fmt.Errorf("reading line: %v", err) + return fmt.Errorf("reading line: %w", err) } - parts := strings.Fields(line) - if len(parts) == 0 { - continue - } - cmd := parts[0] - handler, exists := cmdHandlers[cmd] - if exists { - if err := handler(parts[1:]); err != nil { - fmt.Printf("error: %v\n", err) - } - } else { - fmt.Printf("error: unknown command %q\n", cmd) + exit = false + + if err := repl.handleCommand(strings.Fields(line)); err != nil { + fmt.Fprintf(repl.console, "error: %v\n", err) } } } @@ -220,12 +354,19 @@ var policyPlaygroundCmd = &cobra.Command{ Long: `A REPL for testing placement policies. If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, Run: func(cmd *cobra.Command, _ []string) { - repl, err := newPolicyPlaygroundREPL(cmd) - commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err) + repl := newPolicyPlaygroundREPL(cmd) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) }, } +const ( + netmapConfigPath = "netmap-config" + netmapConfigUsage = "Path to the netmap configuration file" +) + func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) + policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) + + _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go index 99d1a4231..2da21e767 100644 --- a/cmd/frostfs-cli/modules/container/root.go +++ b/cmd/frostfs-cli/modules/container/root.go @@ -25,8 +25,6 @@ func init() { deleteContainerCmd, listContainerObjectsCmd, getContainerInfoCmd, - getExtendedACLCmd, - setExtendedACLCmd, containerNodesCmd, policyPlaygroundCmd, } @@ -38,8 +36,6 @@ func init() { initContainerDeleteCmd() initContainerListObjectsCmd() initContainerInfoCmd() - initContainerGetEACLCmd() - initContainerSetEACLCmd() initContainerNodesCmd() initContainerPolicyPlaygroundCmd() @@ -53,7 +49,6 @@ func init() { }{ {createContainerCmd, "PUT"}, {deleteContainerCmd, "DELETE"}, - {setExtendedACLCmd, "SETEACL"}, } { commonflags.InitSession(el.cmd, "container "+el.verb) } diff --git a/cmd/frostfs-cli/modules/container/set_eacl.go b/cmd/frostfs-cli/modules/container/set_eacl.go deleted file mode 100644 index 86aa50a57..000000000 --- a/cmd/frostfs-cli/modules/container/set_eacl.go +++ /dev/null @@ -1,108 +0,0 @@ -package container - -import ( - "bytes" - "errors" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" -) - -var flagVarsSetEACL struct { - noPreCheck bool - - srcPath string -} - -var setExtendedACLCmd = &cobra.Command{ - Use: "set-eacl", - Short: "Set new extended ACL table for container", - Long: `Set new extended ACL table for container. -Container ID in EACL table will be substituted with ID from the CLI.`, - Run: func(cmd *cobra.Command, _ []string) { - id := parseContainerID(cmd) - eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath) - - tok := getSession(cmd) - - eaclTable.SetCID(id) - - pk := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - if !flagVarsSetEACL.noPreCheck { - cmd.Println("Checking the ability to modify access rights in the container...") - - extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id) - commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err) - - if !extendable { - commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable")) - } - - cmd.Println("ACL extension is enabled in the container, continue processing.") - } - - setEACLPrm := internalclient.SetEACLPrm{ - Client: cli, - ClientParams: client.PrmContainerSetEACL{ - Table: eaclTable, - Session: tok, - }, - } - - _, err := internalclient.SetEACL(cmd.Context(), setEACLPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - if containerAwait { - exp, err := eaclTable.Marshal() - commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err) - - cmd.Println("awaiting...") - - getEACLPrm := internalclient.EACLPrm{ - Client: cli, - ClientParams: client.PrmContainerEACL{ - ContainerID: &id, - }, - } - - for i := 0; i < awaitTimeout; i++ { - time.Sleep(1 * time.Second) - - res, err := internalclient.EACL(cmd.Context(), getEACLPrm) - if err == nil { - // compare binary values because EACL could have been set already - table := res.EACL() - got, err := table.Marshal() - if err != nil { - continue - } - - if bytes.Equal(exp, got) { - cmd.Println("EACL has been persisted on sidechain") - return - } - } - } - - commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout) - } - }, -} - -func initContainerSetEACLCmd() { - commonflags.Init(setExtendedACLCmd) - - flags := setExtendedACLCmd.Flags() - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table") - flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted") - flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL") -} diff --git a/cmd/frostfs-cli/modules/container/util.go b/cmd/frostfs-cli/modules/container/util.go index 48265f785..4cb268ec5 100644 --- a/cmd/frostfs-cli/modules/container/util.go +++ b/cmd/frostfs-cli/modules/container/util.go @@ -18,9 +18,8 @@ const ( ) var ( - errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain") - errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain") - errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain") + errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain") + errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain") ) func parseContainerID(cmd *cobra.Command) cid.ID { diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go index a22d0525d..42f229ad9 100644 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ b/cmd/frostfs-cli/modules/control/add_rule.go @@ -1,23 +1,14 @@ package control import ( - "encoding/hex" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) -const ( - ruleFlag = "rule" - pathFlag = "path" -) - var addRuleCmd = &cobra.Command{ Use: "add-rule", Short: "Add local override", @@ -31,41 +22,12 @@ control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid .. Run: addRule, } -func parseChain(cmd *cobra.Command) *apechain.Chain { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - - chainIDRaw := []byte(chainID) - - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - - chain := new(apechain.Chain) - chain.ID = apechain.ID(chainIDRaw) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) - } - - cmd.Println("Parsed chain:") - util.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} - func addRule(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) target := parseTarget(cmd) - parsed := parseChain(cmd) + parsed := apeCmd.ParseChain(cmd) req := &control.AddChainLocalOverrideRequest{ Body: &control.AddChainLocalOverrideRequest_Body{ @@ -94,13 +56,13 @@ func initControlAddRuleCmd() { initControlFlags(addRuleCmd) ff := addRuleCmd.Flags() - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = addRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") + ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") + ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") - addRuleCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag) + addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) } diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go index 5e5b60c3d..025a6e561 100644 --- a/cmd/frostfs-cli/modules/control/detach_shards.go +++ b/cmd/frostfs-cli/modules/control/detach_shards.go @@ -1,10 +1,10 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go index 13bb81a0a..632cdd6a7 100644 --- a/cmd/frostfs-cli/modules/control/doctor.go +++ b/cmd/frostfs-cli/modules/control/doctor.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go index 8c0bb2332..dcc1c1229 100644 --- a/cmd/frostfs-cli/modules/control/drop_objects.go +++ b/cmd/frostfs-cli/modules/control/drop_objects.go @@ -1,10 +1,10 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go deleted file mode 100644 index 458e4cc0b..000000000 --- a/cmd/frostfs-cli/modules/control/evacuate_shard.go +++ /dev/null @@ -1,56 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "github.com/spf13/cobra" -) - -const ignoreErrorsFlag = "no-errors" - -var evacuateShardCmd = &cobra.Command{ - Use: "evacuate", - Short: "Evacuate objects from shard", - Long: "Evacuate objects from shard to other shards", - Run: evacuateShard, - Deprecated: "use frostfs-cli control shards evacuation start", -} - -func evacuateShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)} - req.Body.Shard_ID = getShardIDList(cmd) - req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.EvacuateShardResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.EvacuateShard(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount()) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard has successfully been evacuated.") -} - -func initControlEvacuateShardCmd() { - initControlFlags(evacuateShardCmd) - - flags := evacuateShardCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects") - - evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 6fa5ed75c..b8d7eb046 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -7,19 +7,24 @@ import ( "sync/atomic" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) const ( - awaitFlag = "await" - noProgressFlag = "no-progress" - scopeFlag = "scope" + awaitFlag = "await" + noProgressFlag = "no-progress" + scopeFlag = "scope" + repOneOnlyFlag = "rep-one-only" + ignoreErrorsFlag = "no-errors" + + containerWorkerCountFlag = "container-worker-count" + objectWorkerCountFlag = "object-worker-count" scopeAll = "all" scopeObjects = "objects" @@ -64,12 +69,18 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) + containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag) + objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag) + repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag) req := &control.StartShardEvacuationRequest{ Body: &control.StartShardEvacuationRequest_Body{ - Shard_ID: getShardIDList(cmd), - IgnoreErrors: ignoreErrors, - Scope: getEvacuationScope(cmd), + Shard_ID: getShardIDList(cmd), + IgnoreErrors: ignoreErrors, + Scope: getEvacuationScope(cmd), + ContainerWorkerCount: containerWorkerCount, + ObjectWorkerCount: objectWorkerCount, + RepOneOnly: repOneOnly, }, } @@ -285,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) + fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -294,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) + fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) + fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) + fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) } } @@ -321,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - sb.WriteString(fmt.Sprintf(" Status: %s.", status)) + fmt.Fprintf(sb, " Status: %s.", status) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -339,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees())) + resp.GetBody().GetFailedTrees()) } func initControlEvacuationShardCmd() { @@ -371,6 +382,9 @@ func initControlStartEvacuationShardCmd() { flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll)) flags.Bool(awaitFlag, false, "Block execution until evacuation is completed") flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag)) + flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers") + flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers") + flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'") startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) } diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go index 541961903..280aacfad 100644 --- a/cmd/frostfs-cli/modules/control/flush_cache.go +++ b/cmd/frostfs-cli/modules/control/flush_cache.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go index 050cf165c..4da903a9a 100644 --- a/cmd/frostfs-cli/modules/control/get_rule.go +++ b/cmd/frostfs-cli/modules/control/get_rule.go @@ -3,11 +3,11 @@ package control import ( "encoding/hex" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) @@ -24,8 +24,8 @@ func getRule(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) + chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) + hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) if hexEncoded { chainIDBytes, err := hex.DecodeString(chainID) @@ -56,16 +56,16 @@ func getRule(cmd *cobra.Command, _ []string) { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain())) - util.PrintHumanReadableAPEChain(cmd, &chain) + apecmd.PrintHumanReadableAPEChain(cmd, &chain) } func initControGetRuleCmd() { initControlFlags(getRuleCmd) ff := getRuleCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = getRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) + ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) + _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) + ff.String(apecmd.ChainIDFlag, "", "Chain id") + ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") } diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go index 2241a403f..1d4441f1e 100644 --- a/cmd/frostfs-cli/modules/control/healthcheck.go +++ b/cmd/frostfs-cli/modules/control/healthcheck.go @@ -3,11 +3,11 @@ package control import ( "os" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go index 4f272c1b4..373f21c30 100644 --- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go +++ b/cmd/frostfs-cli/modules/control/ir_healthcheck.go @@ -3,12 +3,12 @@ package control import ( "os" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go index a66d7e06d..460e299e5 100644 --- a/cmd/frostfs-cli/modules/control/ir_remove_container.go +++ b/cmd/frostfs-cli/modules/control/ir_remove_container.go @@ -1,13 +1,13 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go index 412dc7934..2fe686d63 100644 --- a/cmd/frostfs-cli/modules/control/ir_remove_node.go +++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go @@ -4,11 +4,11 @@ import ( "encoding/hex" "errors" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go index 6965b5dca..5f09e92c1 100644 --- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go +++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go @@ -1,11 +1,11 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go index f5fc27bda..a6c65d083 100644 --- a/cmd/frostfs-cli/modules/control/list_rules.go +++ b/cmd/frostfs-cli/modules/control/list_rules.go @@ -1,18 +1,16 @@ package control import ( - "errors" "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/cli/input" + policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/spf13/cobra" ) @@ -23,65 +21,25 @@ var listRulesCmd = &cobra.Command{ Run: listRules, } -const ( - defaultNamespace = "root" - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" -) - -const ( - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" -) - -var ( - errSettingDefaultValueWasDeclined = errors.New("setting default value was declined") - errUnknownTargetType = errors.New("unknown target type") -) +var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{ + policyengine.Namespace: control.ChainTarget_NAMESPACE, + policyengine.Container: control.ChainTarget_CONTAINER, + policyengine.User: control.ChainTarget_USER, + policyengine.Group: control.ChainTarget_GROUP, +} func parseTarget(cmd *cobra.Command) *control.ChainTarget { - typ, _ := cmd.Flags().GetString(targetTypeFlag) - name, _ := cmd.Flags().GetString(targetNameFlag) - switch typ { - case namespaceTarget: - if name == "" { - ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace)) - commonCmd.ExitOnErr(cmd, "read line error: %w", err) - ln = strings.ToLower(ln) - if len(ln) > 0 && (ln[0] == 'n') { - commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined) - } - name = defaultNamespace - } - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_NAMESPACE, - } - case containerTarget: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_CONTAINER, - } - case userTarget: - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_USER, - } - case groupTarget: - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_GROUP, - } - default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + target := apeCmd.ParseTarget(cmd) + + typ, ok := engineToControlSvcType[target.Type] + if !ok { + commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type)) + } + + return &control.ChainTarget{ + Name: target.Name, + Type: typ, } - return nil } func listRules(cmd *cobra.Command, _ []string) { @@ -117,7 +75,7 @@ func listRules(cmd *cobra.Command, _ []string) { for _, c := range chains { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c)) - util.PrintHumanReadableAPEChain(cmd, &chain) + apeCmd.PrintHumanReadableAPEChain(cmd, &chain) } } @@ -125,7 +83,7 @@ func initControlListRulesCmd() { initControlFlags(listRulesCmd) ff := listRulesCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = listRulesCmd.MarkFlagRequired(targetTypeFlag) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 6a988c355..3142d02e7 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -2,26 +2,20 @@ package control import ( "bytes" - "crypto/sha256" "fmt" "strconv" "text/tabwriter" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -const ( - chainNameFlag = "chain-name" - chainNameFlagUsage = "Chain name(ingress|s3)" -) - var listTargetsCmd = &cobra.Command{ Use: "list-targets", Short: "List local targets", @@ -32,15 +26,11 @@ var listTargetsCmd = &cobra.Command{ func listTargets(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) - var cnr cid.ID - chainName, _ := cmd.Flags().GetString(chainNameFlag) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) + chainName := apeCmd.ParseChainName(cmd) req := &control.ListTargetsLocalOverridesRequest{ Body: &control.ListTargetsLocalOverridesRequest_Body{ - ChainName: chainName, + ChainName: string(chainName), }, } @@ -72,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) { tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) _, _ = tw.Write([]byte("#\tName\tType\n")) for i, t := range targets { - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) } _ = tw.Flush() cmd.Print(buf.String()) @@ -82,7 +72,7 @@ func initControlListTargetsCmd() { initControlFlags(listTargetsCmd) ff := listTargetsCmd.Flags() - ff.String(chainNameFlag, "", chainNameFlagUsage) + ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc) - _ = cobra.MarkFlagRequired(ff, chainNameFlag) + _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag) } diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go new file mode 100644 index 000000000..4cb4be539 --- /dev/null +++ b/cmd/frostfs-cli/modules/control/locate.go @@ -0,0 +1,117 @@ +package control + +import ( + "bytes" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" + "github.com/spf13/cobra" +) + +const ( + FullInfoFlag = "full" + FullInfoFlagUsage = "Print full ShardInfo." +) + +var locateObjectCmd = &cobra.Command{ + Use: "locate-object", + Short: "List shards storing the object", + Long: "List shards storing the object", + Run: locateObject, +} + +func initControlLocateObjectCmd() { + initControlFlags(locateObjectCmd) + + flags := locateObjectCmd.Flags() + + flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) + + flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) + + flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") + flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) +} + +func locateObject(cmd *cobra.Command, _ []string) { + var cnr cid.ID + var obj oid.ID + + _ = object.ReadObjectAddress(cmd, &cnr, &obj) + + pk := key.Get(cmd) + + body := new(control.ListShardsForObjectRequest_Body) + body.SetContainerId(cnr.EncodeToString()) + body.SetObjectId(obj.EncodeToString()) + req := new(control.ListShardsForObjectRequest) + req.SetBody(body) + signRequest(cmd, pk, req) + + cli := getClient(cmd, pk) + + var err error + var resp *control.ListShardsForObjectResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + resp, err = control.ListShardsForObject(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + shardIDs := resp.GetBody().GetShard_ID() + + isFull, _ := cmd.Flags().GetBool(FullInfoFlag) + if !isFull { + for _, id := range shardIDs { + cmd.Println(base58.Encode(id)) + } + return + } + + // get full shard info + listShardsReq := new(control.ListShardsRequest) + listShardsReq.SetBody(new(control.ListShardsRequest_Body)) + signRequest(cmd, pk, listShardsReq) + var listShardsResp *control.ListShardsResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + listShardsResp, err = control.ListShards(client, listShardsReq) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) + + shards := listShardsResp.GetBody().GetShards() + sortShardsByID(shards) + shards = filterShards(shards, shardIDs) + + isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) + if isJSON { + prettyPrintShardsJSON(cmd, shards) + } else { + prettyPrintShards(cmd, shards) + } +} + +func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { + var res []control.ShardInfo + for _, id := range ids { + for _, inf := range info { + if bytes.Equal(inf.Shard_ID, id) { + res = append(res, inf) + } + } + } + return res +} diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go new file mode 100644 index 000000000..3df12a15d --- /dev/null +++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go @@ -0,0 +1,88 @@ +package control + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "github.com/mr-tron/base58" + "github.com/spf13/cobra" +) + +const ( + fillPercentFlag = "fill_percent" +) + +var shardsRebuildCmd = &cobra.Command{ + Use: "rebuild", + Short: "Rebuild shards", + Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)", + Run: shardsRebuild, +} + +func shardsRebuild(cmd *cobra.Command, _ []string) { + pk := key.Get(cmd) + + req := &control.StartShardRebuildRequest{ + Body: &control.StartShardRebuildRequest_Body{ + Shard_ID: getShardIDList(cmd), + TargetFillPercent: getFillPercentValue(cmd), + ConcurrencyLimit: getConcurrencyValue(cmd), + }, + } + + signRequest(cmd, pk, req) + + cli := getClient(cmd, pk) + + var resp *control.StartShardRebuildResponse + var err error + err = cli.ExecRaw(func(client *rawclient.Client) error { + resp, err = control.StartShardRebuild(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + var success, failed uint + for _, res := range resp.GetBody().GetResults() { + if res.GetSuccess() { + success++ + cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID())) + } else { + failed++ + cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError()) + } + } + cmd.Printf("Total: %d success, %d failed\n", success, failed) +} + +func getFillPercentValue(cmd *cobra.Command) uint32 { + v, _ := cmd.Flags().GetUint32(fillPercentFlag) + if v <= 0 || v > 100 { + commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v)) + } + return v +} + +func getConcurrencyValue(cmd *cobra.Command) uint32 { + v, _ := cmd.Flags().GetUint32(concurrencyFlag) + if v <= 0 || v > 10000 { + commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v)) + } + return v +} + +func initControlShardRebuildCmd() { + initControlFlags(shardsRebuildCmd) + + flags := shardsRebuildCmd.Flags() + flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") + flags.Bool(shardAllFlag, false, "Process all shards") + flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space") + flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files") + setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) +} diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go index 4189ea76b..036317bcb 100644 --- a/cmd/frostfs-cli/modules/control/remove_rule.go +++ b/cmd/frostfs-cli/modules/control/remove_rule.go @@ -4,19 +4,14 @@ import ( "encoding/hex" "errors" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) -const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - allFlag = "all" -) - var ( errEmptyChainID = errors.New("chain id cannot be empty") @@ -30,8 +25,8 @@ var ( func removeRule(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - removeAll, _ := cmd.Flags().GetBool(allFlag) + hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) + removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag) if removeAll { req := &control.RemoveChainLocalOverridesByTargetRequest{ Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{ @@ -52,7 +47,7 @@ func removeRule(cmd *cobra.Command, _ []string) { return } - chainID, _ := cmd.Flags().GetString(chainIDFlag) + chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) if chainID == "" { commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID) } @@ -92,11 +87,11 @@ func initControlRemoveRuleCmd() { initControlFlags(removeRuleCmd) ff := removeRuleCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = removeRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") - ff.Bool(allFlag, false, "Remove all chains") - removeRuleCmd.MarkFlagsMutuallyExclusive(allFlag, chainIDFlag) + ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) + ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) + _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) + ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc) + ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc) + ff.Bool(apecmd.AllFlag, false, "Remove all chains") + removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag) } diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index b20d3618e..3abfe80cb 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -39,6 +39,7 @@ func init() { listRulesCmd, getRuleCmd, listTargetsCmd, + locateObjectCmd, ) initControlHealthCheckCmd() @@ -52,4 +53,5 @@ func init() { initControlListRulesCmd() initControGetRuleCmd() initControlListTargetsCmd() + initControlLocateObjectCmd() } diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go index 31ade1eb9..26a1ba883 100644 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go @@ -6,12 +6,12 @@ import ( "fmt" "time" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) @@ -84,7 +84,7 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) { body.SetStatus(control.NetmapStatus_MAINTENANCE) if force { - body.SetForceMaintenance() + body.SetForceMaintenance(true) common.PrintVerbose(cmd, "Local maintenance will be forced.") } targetStatus = control.NetmapStatus_MAINTENANCE @@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client. var resp *control.GetNetmapStatusResponse var err error err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.GetNetmapStatus(client, req) + resp, err = control.GetNetmapStatus(cmd.Context(), client, req) return err }) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go index d8198c426..3483f5d62 100644 --- a/cmd/frostfs-cli/modules/control/shards.go +++ b/cmd/frostfs-cli/modules/control/shards.go @@ -13,19 +13,19 @@ var shardsCmd = &cobra.Command{ func initControlShardsCmd() { shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(setShardModeCmd) - shardsCmd.AddCommand(evacuateShardCmd) shardsCmd.AddCommand(evacuationShardCmd) shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(doctorCmd) shardsCmd.AddCommand(writecacheShardCmd) shardsCmd.AddCommand(shardsDetachCmd) + shardsCmd.AddCommand(shardsRebuildCmd) initControlShardsListCmd() initControlSetShardModeCmd() - initControlEvacuateShardCmd() initControlEvacuationShardCmd() initControlFlushCacheCmd() initControlDoctorCmd() initControlShardsWritecacheCmd() initControlShardsDetachCmd() + initControlShardRebuildCmd() } diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go index 07c5bcd9a..40d6628ee 100644 --- a/cmd/frostfs-cli/modules/control/shards_list.go +++ b/cmd/frostfs-cli/modules/control/shards_list.go @@ -7,11 +7,11 @@ import ( "sort" "strings" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) @@ -61,17 +61,18 @@ func listShards(cmd *cobra.Command, _ []string) { } } -func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) { +func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) { out := make([]map[string]any, 0, len(ii)) for _, i := range ii { out = append(out, map[string]any{ - "shard_id": base58.Encode(i.GetShard_ID()), - "mode": shardModeToString(i.GetMode()), - "metabase": i.GetMetabasePath(), - "blobstor": i.GetBlobstor(), - "writecache": i.GetWritecachePath(), - "pilorama": i.GetPiloramaPath(), - "error_count": i.GetErrorCount(), + "shard_id": base58.Encode(i.GetShard_ID()), + "mode": shardModeToString(i.GetMode()), + "metabase": i.GetMetabasePath(), + "blobstor": i.GetBlobstor(), + "writecache": i.GetWritecachePath(), + "pilorama": i.GetPiloramaPath(), + "error_count": i.GetErrorCount(), + "evacuation_in_progress": i.GetEvacuationInProgress(), }) } @@ -83,7 +84,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) { cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println } -func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) { +func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) { for _, i := range ii { pathPrinter := func(name, path string) string { if path == "" { @@ -105,7 +106,8 @@ func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) { sb.String()+ pathPrinter("Write-cache", i.GetWritecachePath())+ pathPrinter("Pilorama", i.GetPiloramaPath())+ - fmt.Sprintf("Error count: %d\n", i.GetErrorCount()), + fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+ + fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()), base58.Encode(i.GetShard_ID()), shardModeToString(i.GetMode()), ) @@ -121,7 +123,7 @@ func shardModeToString(m control.ShardMode) string { return "unknown" } -func sortShardsByID(ii []*control.ShardInfo) { +func sortShardsByID(ii []control.ShardInfo) { sort.Slice(ii, func(i, j int) bool { return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0 }) diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go index e73f15178..8fe01ba30 100644 --- a/cmd/frostfs-cli/modules/control/shards_set_mode.go +++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go @@ -6,10 +6,10 @@ import ( "slices" "strings" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) @@ -117,10 +117,10 @@ func setShardMode(cmd *cobra.Command, _ []string) { req.SetBody(body) body.SetMode(mode) - body.SetShardIDList(getShardIDList(cmd)) + body.SetShard_ID(getShardIDList(cmd)) reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag) - body.ClearErrorCounter(reset) + body.SetResetErrorCounter(reset) signRequest(cmd, pk, req) diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go index 5f2e4da96..1e4575f49 100644 --- a/cmd/frostfs-cli/modules/control/synchronize_tree.go +++ b/cmd/frostfs-cli/modules/control/synchronize_tree.go @@ -4,12 +4,12 @@ import ( "crypto/sha256" "errors" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go index c0577ac0c..41d9dbf8a 100644 --- a/cmd/frostfs-cli/modules/control/util.go +++ b/cmd/frostfs-cli/modules/control/util.go @@ -4,11 +4,11 @@ import ( "crypto/ecdsa" "errors" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" "github.com/spf13/cobra" @@ -44,7 +44,7 @@ func verifyResponse(cmd *cobra.Command, GetSign() []byte }, body interface { - StableMarshal([]byte) []byte + MarshalProtobuf([]byte) []byte }, ) { if sigControl == nil { @@ -60,7 +60,7 @@ func verifyResponse(cmd *cobra.Command, var sig frostfscrypto.Signature commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2)) - if !sig.Verify(body.StableMarshal(nil)) { + if !sig.Verify(body.MarshalProtobuf(nil)) { commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature")) } } diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index abc4ed2e6..d0c9a641b 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -1,14 +1,20 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) +const ( + asyncFlag = "async" + restoreModeFlag = "restore-mode" + shrinkFlag = "shrink" +) + var writecacheShardCmd = &cobra.Command{ Use: "writecache", Short: "Operations with storage node's write-cache", @@ -18,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{ Use: "seal", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.", + Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Run: sealWritecache, } @@ -26,10 +32,16 @@ func sealWritecache(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) + async, _ := cmd.Flags().GetBool(asyncFlag) + restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag) + shrink, _ := cmd.Flags().GetBool(shrinkFlag) req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{ Shard_ID: getShardIDList(cmd), IgnoreErrors: ignoreErrors, + Async: async, + RestoreMode: restoreMode, + Shrink: shrink, }} signRequest(cmd, pk, req) @@ -68,6 +80,9 @@ func initControlShardsWritecacheCmd() { ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") ff.Bool(shardAllFlag, false, "Process all shards") ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects") + ff.Bool(asyncFlag, false, "Run operation in background") + ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing") + ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage") sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) } diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index b6ec48f35..5da66dcd9 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -49,24 +49,24 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("key:", hex.EncodeToString(i.PublicKey())) var stateWord string - switch { + switch i.Status() { default: stateWord = "" - case i.IsOnline(): + case netmap.Online: stateWord = "online" - case i.IsOffline(): + case netmap.Offline: stateWord = "offline" - case i.IsMaintenance(): + case netmap.Maintenance: stateWord = "maintenance" } cmd.Println("state:", stateWord) - netmap.IterateNetworkEndpoints(i, func(s string) { + for s := range i.NetworkEndpoints() { cmd.Println("address:", s) - }) + } - i.IterateAttributes(func(key, value string) { + for key, value := range i.Attributes() { cmd.Printf("attribute: %s=%s\n", key, value) - }) + } } diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go index e4e9cddb8..08a9ac4c8 100644 --- a/cmd/frostfs-cli/modules/object/delete.go +++ b/cmd/frostfs-cli/modules/object/delete.go @@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) } - objAddr = readObjectAddress(cmd, &cnr, &obj) + objAddr = ReadObjectAddress(cmd, &cnr, &obj) } pk := key.GetOrGenerate(cmd) diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go index f1edccba2..7312f5384 100644 --- a/cmd/frostfs-cli/modules/object/get.go +++ b/cmd/frostfs-cli/modules/object/get.go @@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) filename := cmd.Flag(fileFlag).Value.String() out, closer := createOutWriter(cmd, filename) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index 26243e7e7..25df375d4 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/spf13/cobra" @@ -42,7 +41,9 @@ func initObjectHashCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take hash from in the form offset1:length1,...") + flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") + _ = objectHashCmd.MarkFlagRequired("range") + flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String(getRangeHashSaltFlag, "", "Salt in hex format") } @@ -51,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - tz := typ == hashTz - fullHash := len(ranges) == 0 - if fullHash { - var headPrm internalclient.HeadObjectPrm - headPrm.SetClient(cli) - Prepare(cmd, &headPrm) - headPrm.SetAddress(objAddr) - - // get hash of full payload through HEAD (may be user can do it through dedicated command?) - res, err := internalclient.HeadObject(cmd.Context(), headPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - var cs checksum.Checksum - var csSet bool - - if tz { - cs, csSet = res.Header().PayloadHomomorphicHash() - } else { - cs, csSet = res.Header().PayloadChecksum() - } - - if csSet { - cmd.Println(hex.EncodeToString(cs.Value())) - } else { - cmd.Println("Missing checksum in object header.") - } - - return - } - var hashPrm internalclient.HashPayloadRangesPrm hashPrm.SetClient(cli) Prepare(cmd, &hashPrm) @@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { hashPrm.SetSalt(salt) hashPrm.SetRanges(ranges) - if tz { + if typ == hashTz { hashPrm.TZ() } diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 14797dc41..97e996cad 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -6,12 +6,12 @@ import ( "fmt" "os" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -38,7 +38,6 @@ func initObjectHeadCmd() { _ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag) flags.String(fileFlag, "", "File to write header to. Default: stdout.") - flags.Bool("main-only", false, "Return only main fields") flags.Bool(commonflags.JSON, false, "Marshal output in JSON") flags.Bool("proto", false, "Marshal output in Protobuf") flags.Bool(rawFlag, false, rawFlagDesc) @@ -48,8 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) - mainOnly, _ := cmd.Flags().GetBool("main-only") + objAddr := ReadObjectAddress(cmd, &cnr, &obj) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) @@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { raw, _ := cmd.Flags().GetBool(rawFlag) prm.SetRawFlag(raw) prm.SetAddress(objAddr) - prm.SetMainOnlyFlag(mainOnly) res, err := internalclient.HeadObject(cmd.Context(), prm) if err != nil { diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index d2e9af24c..d67db9f0d 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -7,17 +7,18 @@ import ( "strconv" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "github.com/spf13/viper" ) // object lock command. @@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index d04cf6f04..476238651 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -1,8 +1,6 @@ package object import ( - "bytes" - "cmp" "context" "crypto/ecdsa" "encoding/hex" @@ -30,7 +28,8 @@ import ( ) const ( - verifyPresenceAllFlag = "verify-presence-all" + verifyPresenceAllFlag = "verify-presence-all" + preferInternalAddressesFlag = "prefer-internal-addresses" ) var ( @@ -50,6 +49,12 @@ type ecHeader struct { parent oid.ID } +type objectCounter struct { + sync.Mutex + total uint32 + isECcounted bool +} + type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -58,6 +63,7 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement + total uint32 } type ObjNodesDataObject struct { @@ -97,28 +103,29 @@ func initObjectNodesCmd() { flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.") flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.") + flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.") } func objectNodes(cmd *cobra.Command, _ []string) { var cnrID cid.ID var objID oid.ID - readObjectAddress(cmd, &cnrID, &objID) + ReadObjectAddress(cmd, &cnrID, &objID) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, result) + getActualPlacement(cmd, netmap, pk, objects, count, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -146,7 +153,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj} + return []phyObject{obj}, 1 } var errSplitInfo *objectSDK.SplitInfoError @@ -156,33 +163,39 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError) + return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil + return nil, 0 } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { - members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { + members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { + var total int splitInfo := errSplitInfo.SplitInfo() - if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID, false); ok { - return members + if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { + if total = len(members); total > 0 { + total-- // linking object is not data object + } + return members, total } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members + return members, len(members) } - return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + return members, len(members) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { result := make([]phyObject, 0, len(members)) + var hasNonEC, hasEC bool var resultGuard sync.Mutex if len(members) == 0 { @@ -191,31 +204,8 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member prmHead.SetRawFlag(true) // to get an error instead of whole object - first := members[0] - var addrObj oid.Address - addrObj.SetContainer(cnrID) - addrObj.SetObject(first) - prmHead.SetAddress(addrObj) - - _, err := internalclient.HeadObject(cmd.Context(), prmHead) - var ecInfoError *objectSDK.ECInfoError - if errors.As(err, &ecInfoError) { - chunks := getECObjectChunks(cmd, cnrID, first, ecInfoError) - result = append(result, chunks...) - } else if err == nil { // not EC object, so all members must be phy objects - for _, member := range members { - result = append(result, phyObject{ - containerID: cnrID, - objectID: member, - }) - } - return result - } else { - commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", err) - } - eg, egCtx := errgroup.WithContext(cmd.Context()) - for idx := 1; idx < len(members); idx++ { + for idx := range members { partObjID := members[idx] eg.Go(func() error { @@ -225,24 +215,44 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member partAddr.SetObject(partObjID) partHeadPrm.SetAddress(partAddr) - _, err := internalclient.HeadObject(egCtx, partHeadPrm) - var ecInfoError *objectSDK.ECInfoError - if errors.As(err, &ecInfoError) { - chunks := getECObjectChunks(cmd, cnrID, partObjID, ecInfoError) - - resultGuard.Lock() - defer resultGuard.Unlock() - result = append(result, chunks...) - - return nil - } else if err == nil { - return errMalformedComplexObject + obj, err := internalclient.HeadObject(egCtx, partHeadPrm) + if err != nil { + var ecInfoError *objectSDK.ECInfoError + if errors.As(err, &ecInfoError) { + resultGuard.Lock() + defer resultGuard.Unlock() + result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...) + hasEC = true + return nil + } + return err } - return err + + if obj.Header().Type() != objectSDK.TypeRegular { + commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type())) + } + + if len(obj.Header().Children()) > 0 { + // linking object is not data object, so skip it + return nil + } + + resultGuard.Lock() + defer resultGuard.Unlock() + result = append(result, phyObject{ + containerID: cnrID, + objectID: partObjID, + }) + hasNonEC = true + + return nil }) } commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait()) + if hasEC && hasNonEC { + commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject) + } return result } @@ -323,7 +333,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem } placementBuilder := placement.NewNetworkMapBuilder(netmap) for _, object := range objects { - placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) for repIdx, rep := range placement { numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() @@ -361,7 +371,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem placementObjectID = object.ecHeader.parent } placementBuilder := placement.NewNetworkMapBuilder(netmap) - placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) for _, vector := range placement { @@ -386,15 +396,16 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { resultMtx := &sync.Mutex{} + counter := &objectCounter{ + total: uint32(count), + } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) eg, egCtx := errgroup.WithContext(cmd.Context()) for _, cand := range candidates { - cand := cand - eg.Go(func() error { cli, err := createClient(egCtx, cmd, cand, pk) if err != nil { @@ -405,9 +416,8 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } for _, object := range objects { - object := object eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -426,6 +436,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) + result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -449,11 +460,14 @@ func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) { var cli *client.Client var addresses []string - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) - addresses = append(addresses, candidate.ExternalAddresses()...) + if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + addresses = append(addresses, candidate.ExternalAddresses()...) + } else { + addresses = append(addresses, candidate.ExternalAddresses()...) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + } + var lastErr error for _, address := range addresses { var networkAddr network.Address @@ -475,7 +489,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -490,6 +504,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { + if res.Header().ECHeader() != nil { + counter.Lock() + defer counter.Unlock() + if !counter.isECcounted { + counter.total *= res.Header().ECHeader().Total() + } + counter.isECcounted = true + } return true, nil } var notFound *apistatus.ObjectNotFound @@ -501,7 +523,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, } func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - normilizeObjectNodesResult(objects, result) if json, _ := cmd.Flags().GetBool(commonflags.JSON); json { printObjectNodesAsJSON(cmd, objID, objects, result) } else { @@ -509,36 +530,9 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } } -func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) { - slices.SortFunc(objects, func(lhs, rhs phyObject) int { - if lhs.ecHeader == nil && rhs.ecHeader == nil { - return bytes.Compare(lhs.objectID[:], rhs.objectID[:]) - } - if lhs.ecHeader == nil { - return -1 - } - if rhs.ecHeader == nil { - return 1 - } - if lhs.ecHeader.parent == rhs.ecHeader.parent { - return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index) - } - return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:]) - }) - for _, obj := range objects { - op := result.placements[obj.objectID] - slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - result.placements[obj.objectID] = op - } -} - func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) + fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go new file mode 100644 index 000000000..ebbde76a2 --- /dev/null +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -0,0 +1,174 @@ +package object + +import ( + "fmt" + "os" + "strconv" + "strings" + + internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +const ( + newAttrsFlagName = "new-attrs" + replaceAttrsFlagName = "replace-attrs" + rangeFlagName = "range" + payloadFlagName = "payload" + splitHeaderFlagName = "split-header" +) + +var objectPatchCmd = &cobra.Command{ + Use: "patch", + Run: patch, + Short: "Patch FrostFS object", + Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.", + Example: ` +frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs +frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY +frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload +`, +} + +func initObjectPatchCmd() { + commonflags.Init(objectPatchCmd) + initFlagSession(objectPatchCmd, "PATCH") + + flags := objectPatchCmd.Flags() + + flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) + _ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag) + + flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) + _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) + + flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") + flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") + flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") + flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") + flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") +} + +func patch(cmd *cobra.Command, _ []string) { + var cnr cid.ID + var obj oid.ID + + objAddr := ReadObjectAddress(cmd, &cnr, &obj) + + ranges, err := getRangeSlice(cmd) + commonCmd.ExitOnErr(cmd, "", err) + + payloads := patchPayloadPaths(cmd) + + if len(ranges) != len(payloads) { + commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads))) + } + + newAttrs, err := parseNewObjectAttrs(cmd) + commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err) + replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName) + + pk := key.GetOrGenerate(cmd) + + cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) + + var prm internalclient.PatchObjectPrm + prm.SetClient(cli) + Prepare(cmd, &prm) + ReadOrOpenSession(cmd, &prm, pk, cnr, nil) + + prm.SetAddress(objAddr) + prm.NewAttributes = newAttrs + prm.ReplaceAttribute = replaceAttrs + + prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) + + for i := range ranges { + prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ + Range: ranges[i], + PayloadPath: payloads[i], + }) + } + + res, err := internalclient.Patch(cmd.Context(), prm) + if err != nil { + commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err) + } + cmd.Println("Patched object ID: ", res.OID.EncodeToString()) +} + +func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { + rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) + if err != nil { + return nil, err + } + + attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes + for i := range rawAttrs { + k, v, found := strings.Cut(rawAttrs[i], "=") + if !found { + return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i]) + } + attrs[i].SetKey(k) + attrs[i].SetValue(v) + } + return attrs, nil +} + +func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) { + v, _ := cmd.Flags().GetStringSlice(rangeFlagName) + if len(v) == 0 { + return []objectSDK.Range{}, nil + } + rs := make([]objectSDK.Range, len(v)) + for i := range v { + before, after, found := strings.Cut(v[i], rangeSep) + if !found { + return nil, fmt.Errorf("invalid range specifier: %s", v[i]) + } + + offset, err := strconv.ParseUint(before, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err) + } + length, err := strconv.ParseUint(after, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err) + } + + rs[i].SetOffset(offset) + rs[i].SetLength(length) + } + return rs, nil +} + +func patchPayloadPaths(cmd *cobra.Command) []string { + v, _ := cmd.Flags().GetStringSlice(payloadFlagName) + return v +} + +func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { + path, _ := cmd.Flags().GetString(splitHeaderFlagName) + if path == "" { + return nil + } + + data, err := os.ReadFile(path) + commonCmd.ExitOnErr(cmd, "read file error: %w", err) + + splitHdrV2 := new(objectV2.SplitHeader) + err = splitHdrV2.Unmarshal(data) + if err != nil { + err = splitHdrV2.UnmarshalJSON(data) + commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) + } + + return objectSDK.NewSplitHeaderFromV2(splitHdrV2) +} diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index 45e02edb3..9e8a7cc6f 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -10,11 +10,11 @@ import ( "strings" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -50,7 +50,7 @@ func initObjectPutCmd() { flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2") + flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") @@ -214,11 +214,9 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute { } func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - var rawAttrs []string - - raw := cmd.Flag("attributes").Value.String() - if len(raw) != 0 { - rawAttrs = strings.Split(raw, ",") + rawAttrs, err := cmd.Flags().GetStringSlice("attributes") + if err != nil { + return nil, err } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index ad4bc3d59..6ec508ae2 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -38,7 +38,7 @@ func initObjectRangeCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take data from in the form offset:length") + flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.Bool(rawFlag, false, rawFlagDesc) } @@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !(toJSON || toProto) { + if !toJSON && !toProto { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) @@ -195,11 +195,10 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) { } func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { - v := cmd.Flag("range").Value.String() - if len(v) == 0 { - return nil, nil + vs, err := cmd.Flags().GetStringSlice("range") + if len(vs) == 0 || err != nil { + return nil, err } - vs := strings.Split(v, ",") rs := make([]objectSDK.Range, len(vs)) for i := range vs { before, after, found := strings.Cut(vs[i], rangeSep) diff --git a/cmd/frostfs-cli/modules/object/root.go b/cmd/frostfs-cli/modules/object/root.go index 7d8008b10..b808a509e 100644 --- a/cmd/frostfs-cli/modules/object/root.go +++ b/cmd/frostfs-cli/modules/object/root.go @@ -29,6 +29,7 @@ func init() { objectRangeCmd, objectLockCmd, objectNodesCmd, + objectPatchCmd, } Cmd.AddCommand(objectChildCommands...) @@ -39,6 +40,7 @@ func init() { } initObjectPutCmd() + initObjectPatchCmd() initObjectDeleteCmd() initObjectGetCmd() initObjectSearchCmd() diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index 381c790e9..8e4e8b287 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string { return xs } -func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { +func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { readCID(cmd, cnr) readOID(cmd, obj) @@ -262,13 +262,8 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - rels := collectObjectRelatives(cmd, cli, cnr, *obj) - - if len(rels) == 0 { - objs = []oid.ID{*obj} - } else { - objs = append(rels, *obj) - } + objs = collectObjectRelatives(cmd, cli, cnr, *obj) + objs = append(objs, *obj) } } @@ -306,6 +301,8 @@ func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, ke case *internal.PutObjectPrm: common.PrintVerbose(cmd, "Binding session to object PUT...") tok.ForVerb(session.VerbObjectPut) + case *internal.PatchObjectPrm: + tok.ForVerb(session.VerbObjectPatch) case *internal.DeleteObjectPrm: common.PrintVerbose(cmd, "Binding session to object DELETE...") tok.ForVerb(session.VerbObjectDelete) @@ -372,7 +369,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID, common.PrintVerbose(cmd, "Split information received - object is virtual.") splitInfo := errSplit.SplitInfo() - if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr, true); ok { + if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok { return members } @@ -388,7 +385,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID, return nil } -func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID, withLinking bool) ([]oid.ID, bool) { +func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) { // collect split chain by the descending ease of operations (ease is evaluated heuristically). // If any approach fails, we don't try the next since we assume that it will fail too. @@ -409,10 +406,7 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK. common.PrintVerbose(cmd, "Received split members from the linking object: %v", children) - if withLinking { - return append(children, idLinking), true - } - return children, true + return append(children, idLinking), true } // linking object is not required for diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go index 21c367d29..88acab341 100644 --- a/cmd/frostfs-cli/modules/root.go +++ b/cmd/frostfs-cli/modules/root.go @@ -21,7 +21,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc" - "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -112,14 +111,16 @@ func initConfig() { // Use config file from the flag. viper.SetConfigFile(cfgFile) } else { - // Find home directory. - home, err := homedir.Dir() - commonCmd.ExitOnErr(rootCmd, "", err) - - // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml" - viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli")) - viper.SetConfigName("config") - viper.SetConfigType("yaml") + // Find config directory. + configDir, err := os.UserConfigDir() + if err != nil { + common.PrintVerbose(rootCmd, "Get config dir: %s", err) + } else { + // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml" + viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli")) + viper.SetConfigName("config") + viper.SetConfigType("yaml") + } } viper.SetEnvPrefix(envPrefix) diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go index 068b1d185..e2c05d486 100644 --- a/cmd/frostfs-cli/modules/tree/add.go +++ b/cmd/frostfs-cli/modules/tree/add.go @@ -30,8 +30,6 @@ func initAddCmd() { ff := addCmd.Flags() ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") ff.Uint64(parentIDFlagKey, 0, "Parent node ID") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func add(cmd *cobra.Command, _ []string) { @@ -47,9 +45,10 @@ func add(cmd *cobra.Command, _ []string) { meta, err := parseMeta(cmd) commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) @@ -77,13 +76,13 @@ func add(cmd *cobra.Command, _ []string) { cmd.Println("Node ID: ", resp.GetBody().GetNodeId()) } -func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) { +func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) { raws, _ := cmd.Flags().GetStringSlice(metaFlagKey) if len(raws) == 0 { return nil, nil } - pairs := make([]*tree.KeyValue, 0, len(raws)) + pairs := make([]tree.KeyValue, 0, len(raws)) for i := range raws { k, v, found := strings.Cut(raws[i], "=") if !found { @@ -94,7 +93,7 @@ func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) { pair.Key = k pair.Value = []byte(v) - pairs = append(pairs, &pair) + pairs = append(pairs, pair) } return pairs, nil diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go index ea815dbfe..7263bcd0d 100644 --- a/cmd/frostfs-cli/modules/tree/add_by_path.go +++ b/cmd/frostfs-cli/modules/tree/add_by_path.go @@ -36,7 +36,6 @@ func initAddByPathCmd() { ff.String(pathFlagKey, "", "Path to a node") ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) _ = cobra.MarkFlagRequired(ff, pathFlagKey) } @@ -50,9 +49,10 @@ func addByPath(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 4f4f54657..d71a94b98 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,50 +2,67 @@ package tree import ( "context" - "strings" - "time" + "crypto/tls" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) // _client returns grpc Tree service client. Should be removed // after making Tree API public. -func _client(ctx context.Context) (tree.TreeServiceClient, error) { +func _client() (tree.TreeServiceClient, error) { var netAddr network.Address - err := netAddr.FromString(viper.GetString(commonflags.RPC)) + + rpcEndpoint := viper.GetString(commonflags.RPC) + if rpcEndpoint == "" { + return nil, fmt.Errorf("%s is not defined", commonflags.RPC) + } + + err := netAddr.FromString(rpcEndpoint) if err != nil { return nil, err } + host, isTLS, err := client.ParseURI(netAddr.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + opts := []grpc.DialOption{ - grpc.WithBlock(), grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), + grpc.WithTransportCredentials(creds), } - if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - // a default connection establishing timeout - const defaultClientConnectTimeout = time.Second * 2 - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...) - cancel() - + cc, err := grpc.NewClient(host, opts...) return tree.NewTreeServiceClient(cc), err } + +func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) { + if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 { + common.PrintVerbose(cmd, "Set request timeout to %s.", timeout) + return context.WithTimeout(cmd.Context(), timeout) + } + return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault) +} diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go index f239066cd..210630e60 100644 --- a/cmd/frostfs-cli/modules/tree/get_by_path.go +++ b/cmd/frostfs-cli/modules/tree/get_by_path.go @@ -36,8 +36,6 @@ func initGetByPathCmd() { ff.String(pathFlagKey, "", "Path to a node") ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getByPath(cmd *cobra.Command, _ []string) { @@ -50,9 +48,10 @@ func getByPath(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go index b1b307f62..9d767ab3e 100644 --- a/cmd/frostfs-cli/modules/tree/get_op_log.go +++ b/cmd/frostfs-cli/modules/tree/get_op_log.go @@ -30,8 +30,6 @@ func initGetOpLogCmd() { ff := getOpLogCmd.Flags() ff.Uint64(heightFlagKey, 0, "Height to start with") ff.Uint64(countFlagKey, 10, "Logged operations count") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getOpLog(cmd *cobra.Command, _ []string) { @@ -44,9 +42,10 @@ func getOpLog(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go index f0506467e..c581b8e26 100644 --- a/cmd/frostfs-cli/modules/tree/healthcheck.go +++ b/cmd/frostfs-cli/modules/tree/healthcheck.go @@ -20,15 +20,14 @@ var healthcheckCmd = &cobra.Command{ func initHealthcheckCmd() { commonflags.Init(healthcheckCmd) - ff := healthcheckCmd.Flags() - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func healthcheck(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) req := &tree.HealthcheckRequest{ diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go index a25d066d5..ee1db2a79 100644 --- a/cmd/frostfs-cli/modules/tree/list.go +++ b/cmd/frostfs-cli/modules/tree/list.go @@ -26,8 +26,6 @@ func initListCmd() { ff := listCmd.Flags() ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) _ = listCmd.MarkFlagRequired(commonflags.CIDFlag) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func list(cmd *cobra.Command, _ []string) { @@ -38,9 +36,10 @@ func list(cmd *cobra.Command, _ []string) { err := cnr.DecodeString(cidString) commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go index 95516940c..7a369bd02 100644 --- a/cmd/frostfs-cli/modules/tree/move.go +++ b/cmd/frostfs-cli/modules/tree/move.go @@ -33,8 +33,6 @@ func initMoveCmd() { _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) _ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func move(cmd *cobra.Command, _ []string) { @@ -45,9 +43,10 @@ func move(cmd *cobra.Command, _ []string) { err := cnr.DecodeString(cidString) commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) @@ -75,7 +74,7 @@ func move(cmd *cobra.Command, _ []string) { resp, err := cli.GetSubTree(ctx, subTreeReq) commonCmd.ExitOnErr(cmd, "rpc call: %w", err) - var meta []*tree.KeyValue + var meta []tree.KeyValue subtreeResp, err := resp.Recv() for ; err == nil; subtreeResp, err = resp.Recv() { meta = subtreeResp.GetBody().GetMeta() diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go index 74e9d9749..3c532fe26 100644 --- a/cmd/frostfs-cli/modules/tree/remove.go +++ b/cmd/frostfs-cli/modules/tree/remove.go @@ -29,8 +29,6 @@ func initRemoveCmd() { ff.Uint64(nodeIDFlagKey, 0, "Node ID.") _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func remove(cmd *cobra.Command, _ []string) { @@ -41,9 +39,10 @@ func remove(cmd *cobra.Command, _ []string) { err := cnr.DecodeString(cidString) commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) diff --git a/cmd/frostfs-cli/modules/tree/root.go b/cmd/frostfs-cli/modules/tree/root.go index efd1c08b5..5a53c50d6 100644 --- a/cmd/frostfs-cli/modules/tree/root.go +++ b/cmd/frostfs-cli/modules/tree/root.go @@ -49,6 +49,7 @@ const ( heightFlagKey = "height" countFlagKey = "count" depthFlagKey = "depth" + orderFlagKey = "ordered" ) func initCTID(cmd *cobra.Command) { diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go index e58a13fd6..c5f7ad401 100644 --- a/cmd/frostfs-cli/modules/tree/subtree.go +++ b/cmd/frostfs-cli/modules/tree/subtree.go @@ -30,11 +30,10 @@ func initGetSubtreeCmd() { ff := getSubtreeCmd.Flags() ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.") ff.Uint32(depthFlagKey, 10, "Traversal depth.") + ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.") _ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag) _ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getSubTree(cmd *cobra.Command, _ []string) { @@ -45,9 +44,10 @@ func getSubTree(cmd *cobra.Command, _ []string) { err := cnr.DecodeString(cidString) commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - ctx := cmd.Context() + ctx, cancel := contextWithTimeout(cmd) + defer cancel() - cli, err := _client(ctx) + cli, err := _client() commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) rawCID := make([]byte, sha256.Size) @@ -59,6 +59,13 @@ func getSubTree(cmd *cobra.Command, _ []string) { depth, _ := cmd.Flags().GetUint32(depthFlagKey) + order, _ := cmd.Flags().GetBool(orderFlagKey) + + bodyOrder := tree.GetSubTreeRequest_Body_Order_None + if order { + bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc + } + var bt []byte if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { bt = t.Marshal() @@ -71,6 +78,9 @@ func getSubTree(cmd *cobra.Command, _ []string) { RootId: []uint64{rid}, Depth: depth, BearerToken: bt, + OrderBy: &tree.GetSubTreeRequest_Body_Order{ + Direction: bodyOrder, + }, }, } diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go index 4c2e324b3..145dcc756 100644 --- a/cmd/frostfs-cli/modules/util/acl.go +++ b/cmd/frostfs-cli/modules/util/acl.go @@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) { fmt.Fprintln(w, strings.Join(bits, "\t")) // Footer footer := []string{"X F"} - for i := 0; i < 7; i++ { + for range 7 { footer = append(footer, "U S O B") } fmt.Fprintln(w, strings.Join(footer, "\t")) diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go index d588ba35d..caa6dfcfe 100644 --- a/cmd/frostfs-cli/modules/util/convert_eacl.go +++ b/cmd/frostfs-cli/modules/util/convert_eacl.go @@ -6,7 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" + apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/util/locode.go b/cmd/frostfs-cli/modules/util/locode.go deleted file mode 100644 index a1f0f4d3f..000000000 --- a/cmd/frostfs-cli/modules/util/locode.go +++ /dev/null @@ -1,18 +0,0 @@ -package util - -import ( - "github.com/spf13/cobra" -) - -// locode section. -var locodeCmd = &cobra.Command{ - Use: "locode", - Short: "Working with FrostFS UN/LOCODE database", -} - -func initLocodeCmd() { - locodeCmd.AddCommand(locodeGenerateCmd, locodeInfoCmd) - - initUtilLocodeInfoCmd() - initUtilLocodeGenerateCmd() -} diff --git a/cmd/frostfs-cli/modules/util/locode_generate.go b/cmd/frostfs-cli/modules/util/locode_generate.go deleted file mode 100644 index 319dee1c6..000000000 --- a/cmd/frostfs-cli/modules/util/locode_generate.go +++ /dev/null @@ -1,96 +0,0 @@ -package util - -import ( - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" - airportsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/airports" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb" - continentsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/continents/geojson" - csvlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/table/csv" - "github.com/spf13/cobra" -) - -type namesDB struct { - *airportsdb.DB - *csvlocode.Table -} - -const ( - locodeGenerateInputFlag = "in" - locodeGenerateSubDivFlag = "subdiv" - locodeGenerateAirportsFlag = "airports" - locodeGenerateCountriesFlag = "countries" - locodeGenerateContinentsFlag = "continents" - locodeGenerateOutputFlag = "out" -) - -var ( - locodeGenerateInPaths []string - locodeGenerateSubDivPath string - locodeGenerateAirportsPath string - locodeGenerateCountriesPath string - locodeGenerateContinentsPath string - locodeGenerateOutPath string - - locodeGenerateCmd = &cobra.Command{ - Use: "generate", - Short: "Generate UN/LOCODE database for FrostFS", - Run: func(cmd *cobra.Command, _ []string) { - locodeDB := csvlocode.New( - csvlocode.Prm{ - Path: locodeGenerateInPaths[0], - SubDivPath: locodeGenerateSubDivPath, - }, - csvlocode.WithExtraPaths(locodeGenerateInPaths[1:]...), - ) - - airportDB := airportsdb.New(airportsdb.Prm{ - AirportsPath: locodeGenerateAirportsPath, - CountriesPath: locodeGenerateCountriesPath, - }) - - continentsDB := continentsdb.New(continentsdb.Prm{ - Path: locodeGenerateContinentsPath, - }) - - targetDB := locodebolt.New(locodebolt.Prm{ - Path: locodeGenerateOutPath, - }) - - err := targetDB.Open() - commonCmd.ExitOnErr(cmd, "", err) - - defer targetDB.Close() - - names := &namesDB{ - DB: airportDB, - Table: locodeDB, - } - - err = locodedb.FillDatabase(locodeDB, airportDB, continentsDB, names, targetDB) - commonCmd.ExitOnErr(cmd, "", err) - }, - } -) - -func initUtilLocodeGenerateCmd() { - flags := locodeGenerateCmd.Flags() - - flags.StringSliceVar(&locodeGenerateInPaths, locodeGenerateInputFlag, nil, "List of paths to UN/LOCODE tables (csv)") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateInputFlag) - - flags.StringVar(&locodeGenerateSubDivPath, locodeGenerateSubDivFlag, "", "Path to UN/LOCODE subdivision database (csv)") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateSubDivFlag) - - flags.StringVar(&locodeGenerateAirportsPath, locodeGenerateAirportsFlag, "", "Path to OpenFlights airport database (csv)") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateAirportsFlag) - - flags.StringVar(&locodeGenerateCountriesPath, locodeGenerateCountriesFlag, "", "Path to OpenFlights country database (csv)") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateCountriesFlag) - - flags.StringVar(&locodeGenerateContinentsPath, locodeGenerateContinentsFlag, "", "Path to continent polygons (GeoJSON)") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateContinentsFlag) - - flags.StringVar(&locodeGenerateOutPath, locodeGenerateOutputFlag, "", "Target path for generated database") - _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateOutputFlag) -} diff --git a/cmd/frostfs-cli/modules/util/locode_info.go b/cmd/frostfs-cli/modules/util/locode_info.go deleted file mode 100644 index e89252dea..000000000 --- a/cmd/frostfs-cli/modules/util/locode_info.go +++ /dev/null @@ -1,56 +0,0 @@ -package util - -import ( - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb" - "github.com/spf13/cobra" -) - -const ( - locodeInfoDBFlag = "db" - locodeInfoCodeFlag = "locode" -) - -var ( - locodeInfoDBPath string - locodeInfoCode string - - locodeInfoCmd = &cobra.Command{ - Use: "info", - Short: "Print information about UN/LOCODE from FrostFS database", - Run: func(cmd *cobra.Command, _ []string) { - targetDB := locodebolt.New(locodebolt.Prm{ - Path: locodeInfoDBPath, - }, locodebolt.ReadOnly()) - - err := targetDB.Open() - commonCmd.ExitOnErr(cmd, "", err) - - defer targetDB.Close() - - record, err := locodedb.LocodeRecord(targetDB, locodeInfoCode) - commonCmd.ExitOnErr(cmd, "", err) - - cmd.Printf("Country: %s\n", record.CountryName()) - cmd.Printf("Location: %s\n", record.LocationName()) - cmd.Printf("Continent: %s\n", record.Continent()) - if subDivCode := record.SubDivCode(); subDivCode != "" { - cmd.Printf("Subdivision: [%s] %s\n", subDivCode, record.SubDivName()) - } - - geoPoint := record.GeoPoint() - cmd.Printf("Coordinates: %0.2f, %0.2f\n", geoPoint.Latitude(), geoPoint.Longitude()) - }, - } -) - -func initUtilLocodeInfoCmd() { - flags := locodeInfoCmd.Flags() - - flags.StringVar(&locodeInfoDBPath, locodeInfoDBFlag, "", "Path to FrostFS UN/LOCODE database") - _ = locodeInfoCmd.MarkFlagRequired(locodeInfoDBFlag) - - flags.StringVar(&locodeInfoCode, locodeInfoCodeFlag, "", "UN/LOCODE") - _ = locodeInfoCmd.MarkFlagRequired(locodeInfoCodeFlag) -} diff --git a/cmd/frostfs-cli/modules/util/root.go b/cmd/frostfs-cli/modules/util/root.go index 4a6b4403b..a909e6899 100644 --- a/cmd/frostfs-cli/modules/util/root.go +++ b/cmd/frostfs-cli/modules/util/root.go @@ -23,11 +23,9 @@ func init() { signCmd, convertCmd, keyerCmd, - locodeCmd, ) initSignCmd() initConvertCmd() initKeyerCmd() - initLocodeCmd() } diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 955195477..13a747ba6 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -1,12 +1,17 @@ package main import ( + "context" "os" "os/signal" + "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -36,14 +41,36 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) + var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - return logPrm.Reload() + err = logPrm.SetTags(loggerTags()) + if err != nil { + return err + } + logger.UpdateLevelForTags(logPrm) + + return nil } -func watchForSignal(cancel func()) { +func loggerTags() [][]string { + var res [][]string + for i := 0; ; i++ { + var item []string + index := strconv.FormatInt(int64(i), 10) + names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) + if names == "" { + break + } + item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) + res = append(res, item) + } + return res +} + +func watchForSignal(ctx context.Context, cancel func()) { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) @@ -55,44 +82,49 @@ func watchForSignal(cancel func()) { // signals causing application to shut down should have priority over // reconfiguration signal case <-ch: - log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() - shutdown() - log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + shutdown(ctx) + log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() - shutdown() + shutdown(ctx) return default: // block until any signal is receieved select { case <-ch: - log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() - shutdown() - log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + shutdown(ctx) + log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() - shutdown() + shutdown(ctx) return case <-sighupCh: - log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { + log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) + break + } err := reloadConfig() if err != nil { - log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } - pprofCmp.reload() - metricsCmp.reload() - log.Info(logs.FrostFSIRReloadExtraWallets) + pprofCmp.reload(ctx) + metricsCmp.reload(ctx) + log.Info(ctx, logs.FrostFSIRReloadExtraWallets) err = innerRing.SetExtraWallets(cfg) if err != nil { - log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } - log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) + log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } } } diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go index e703301ae..9b775252f 100644 --- a/cmd/frostfs-ir/defaults.go +++ b/cmd/frostfs-ir/defaults.go @@ -9,6 +9,7 @@ import ( func defaultConfiguration(cfg *viper.Viper) { cfg.SetDefault("logger.level", "info") cfg.SetDefault("logger.destination", "stdout") + cfg.SetDefault("logger.timestamp", false) setPprofDefaults(cfg) @@ -47,6 +48,8 @@ func defaultConfiguration(cfg *viper.Viper) { cfg.SetDefault("node.kludge_compatibility_mode", false) cfg.SetDefault("audit.enabled", false) + + setMultinetDefaults(cfg) } func setControlDefaults(cfg *viper.Viper) { @@ -130,3 +133,11 @@ func setMorphDefaults(cfg *viper.Viper) { cfg.SetDefault("morph.validators", []string{}) cfg.SetDefault("morph.switch_interval", 2*time.Minute) } + +func setMultinetDefaults(cfg *viper.Viper) { + cfg.SetDefault("multinet.enabled", false) + cfg.SetDefault("multinet.balancer", "") + cfg.SetDefault("multinet.restrict", false) + cfg.SetDefault("multinet.fallback_delay", "0s") + cfg.SetDefault("multinet.subnets", "") +} diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index 2792c3548..dd70fc91c 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -1,6 +1,7 @@ package main import ( + "context" "net/http" "time" @@ -24,8 +25,8 @@ const ( shutdownTimeoutKeyPostfix = ".shutdown_timeout" ) -func (c *httpComponent) init() { - log.Info("init " + c.name) +func (c *httpComponent) init(ctx context.Context) { + log.Info(ctx, "init "+c.name) c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) c.address = cfg.GetString(c.name + addressKeyPostfix) c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) @@ -39,14 +40,14 @@ func (c *httpComponent) init() { httputil.WithShutdownTimeout(c.shutdownDur), ) } else { - log.Info(c.name + " is disabled, skip") + log.Info(ctx, c.name+" is disabled, skip") c.srv = nil } } -func (c *httpComponent) start() { +func (c *httpComponent) start(ctx context.Context) { if c.srv != nil { - log.Info("start " + c.name) + log.Info(ctx, "start "+c.name) wg.Add(1) go func() { defer wg.Done() @@ -55,10 +56,10 @@ func (c *httpComponent) start() { } } -func (c *httpComponent) shutdown() error { +func (c *httpComponent) shutdown(ctx context.Context) error { if c.srv != nil { - log.Info("shutdown " + c.name) - return c.srv.Shutdown() + log.Info(ctx, "shutdown "+c.name) + return c.srv.Shutdown(ctx) } return nil } @@ -70,17 +71,17 @@ func (c *httpComponent) needReload() bool { return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur) } -func (c *httpComponent) reload() { - log.Info("reload " + c.name) +func (c *httpComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(c.name + " config updated") - if err := c.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.Error(err), ) } else { - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } } diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 5b852c37c..799feb784 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -30,7 +31,6 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors - logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -69,6 +69,7 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() + var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) @@ -78,50 +79,60 @@ func main() { ) exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() + logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + exitErr(err) + log, err = logger.NewLogger(logPrm) exitErr(err) + logger.UpdateLevelForTags(logPrm) + ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() - pprofCmp.init() + pprofCmp.init(ctx) metricsCmp = newMetricsComponent() - metricsCmp.init() + metricsCmp.init(ctx) audit.Store(cfg.GetBool("audit.enabled")) innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit) exitErr(err) - pprofCmp.start() - metricsCmp.start() + pprofCmp.start(ctx) + metricsCmp.start(ctx) // start inner ring err = innerRing.Start(ctx, intErr) exitErr(err) - log.Info(logs.CommonApplicationStarted, + log.Info(ctx, logs.CommonApplicationStarted, zap.String("version", misc.Version)) - watchForSignal(cancel) + watchForSignal(ctx, cancel) <-ctx.Done() // graceful shutdown - log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) + log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop) wg.Wait() - log.Info(logs.FrostFSIRApplicationStopped) + log.Info(ctx, logs.FrostFSIRApplicationStopped) } -func shutdown() { - innerRing.Stop() - if err := metricsCmp.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), +func shutdown(ctx context.Context) { + innerRing.Stop(ctx) + if err := metricsCmp.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.Error(err), ) } - if err := pprofCmp.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + if err := pprofCmp.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.Error(err), ) } + + if err := sdnotify.ClearStatus(); err != nil { + log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) + } } diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index ff5642008..2aebcde7f 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -1,6 +1,7 @@ package main import ( + "context" "runtime" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -28,8 +29,8 @@ func newPprofComponent() *pprofComponent { } } -func (c *pprofComponent) init() { - c.httpComponent.init() +func (c *pprofComponent) init(ctx context.Context) { + c.httpComponent.init(ctx) if c.enabled { c.blockRate = cfg.GetInt(pprofBlockRateKey) @@ -51,17 +52,17 @@ func (c *pprofComponent) needReload() bool { c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate) } -func (c *pprofComponent) reload() { - log.Info("reload " + c.name) +func (c *pprofComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(c.name + " config updated") - if err := c.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error())) + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.Error(err)) return } - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go index b1a6e3fd2..e7e2c0769 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go +++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go @@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) var prm blobovnicza.GetPrm prm.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go index d327dbc41..d41a15bcf 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/list.go +++ b/cmd/frostfs-lens/internal/blobovnicza/list.go @@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) { } blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr) common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err)) diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go index 0a0cd955d..2819981d6 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/root.go +++ b/cmd/frostfs-lens/internal/blobovnicza/root.go @@ -19,7 +19,7 @@ var Root = &cobra.Command{ } func init() { - Root.AddCommand(listCMD, inspectCMD) + Root.AddCommand(listCMD, inspectCMD, tuiCMD) } func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza { @@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza { blobovnicza.WithPath(vPath), blobovnicza.WithReadOnly(true), ) - common.ExitOnErr(cmd, blz.Open()) + common.ExitOnErr(cmd, blz.Open(cmd.Context())) return blz } diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go new file mode 100644 index 000000000..4aa281616 --- /dev/null +++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go @@ -0,0 +1,68 @@ +package blobovnicza + +import ( + "context" + "fmt" + + common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" + "github.com/rivo/tview" + "github.com/spf13/cobra" +) + +var tuiCMD = &cobra.Command{ + Use: "explore", + Short: "Blobovnicza exploration with a terminal UI", + Long: `Launch a terminal UI to explore blobovnicza and search for data. + +Available search filters: +- cid CID +- oid OID +- addr CID/OID +`, + Run: tuiFunc, +} + +var initialPrompt string + +func init() { + common.AddComponentPathFlag(tuiCMD, &vPath) + + tuiCMD.Flags().StringVar( + &initialPrompt, + "filter", + "", + "Filter prompt to start with, format 'tag:value [+ tag:value]...'", + ) +} + +func tuiFunc(cmd *cobra.Command, _ []string) { + common.ExitOnErr(cmd, runTUI(cmd)) +} + +func runTUI(cmd *cobra.Command) error { + db, err := tui.OpenDB(vPath, false) + if err != nil { + return fmt.Errorf("couldn't open database: %w", err) + } + defer db.Close() + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + app := tview.NewApplication() + ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil) + + _ = ui.AddFilter("cid", tui.CIDParser, "CID") + _ = ui.AddFilter("oid", tui.OIDParser, "OID") + _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") + + err = ui.WithPrompt(initialPrompt) + if err != nil { + return fmt.Errorf("invalid filter prompt: %w", err) + } + + app.SetRoot(ui, true).SetFocus(ui) + return app.Run() +} diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go index 9eb60f966..f436343c7 100644 --- a/cmd/frostfs-lens/internal/meta/inspect.go +++ b/cmd/frostfs-lens/internal/meta/inspect.go @@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) storageID := meta.StorageIDPrm{} storageID.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go index 61b10ca1f..6b27a232f 100644 --- a/cmd/frostfs-lens/internal/meta/list-garbage.go +++ b/cmd/frostfs-lens/internal/meta/list-garbage.go @@ -19,7 +19,7 @@ func init() { func listGarbageFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var garbPrm meta.GarbageIterationPrm garbPrm.SetHandler( diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go index 19a93691c..45642e74b 100644 --- a/cmd/frostfs-lens/internal/meta/list-graveyard.go +++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go @@ -19,7 +19,7 @@ func init() { func listGraveyardFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var gravePrm meta.GraveyardIterationPrm gravePrm.SetHandler( diff --git a/cmd/frostfs-lens/internal/meta/root.go b/cmd/frostfs-lens/internal/meta/root.go index 6741abd0c..351d1ce80 100644 --- a/cmd/frostfs-lens/internal/meta/root.go +++ b/cmd/frostfs-lens/internal/meta/root.go @@ -32,6 +32,7 @@ func init() { inspectCMD, listGraveyardCMD, listGarbageCMD, + tuiCMD, ) } diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go new file mode 100644 index 000000000..7b0e25f3d --- /dev/null +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -0,0 +1,118 @@ +package meta + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + + common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" + "github.com/rivo/tview" + "github.com/spf13/cobra" + "go.etcd.io/bbolt" +) + +var tuiCMD = &cobra.Command{ + Use: "explore", + Short: "Metabase exploration with a terminal UI", + Long: `Launch a terminal UI to explore metabase and search for data. + +Available search filters: +- cid CID +- oid OID +- addr CID/OID +- attr key[/value] +`, + Run: tuiFunc, +} + +var initialPrompt string + +var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ + 2: schema.MetabaseParserV2, + 3: schema.MetabaseParserV3, +} + +func init() { + common.AddComponentPathFlag(tuiCMD, &vPath) + + tuiCMD.Flags().StringVar( + &initialPrompt, + "filter", + "", + "Filter prompt to start with, format 'tag:value [+ tag:value]...'", + ) +} + +func tuiFunc(cmd *cobra.Command, _ []string) { + common.ExitOnErr(cmd, runTUI(cmd)) +} + +func runTUI(cmd *cobra.Command) error { + db, err := tui.OpenDB(vPath, false) + if err != nil { + return fmt.Errorf("couldn't open database: %w", err) + } + defer db.Close() + + schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) + if !hasVersion { + return errors.New("couldn't detect schema version") + } + + metabaseParser, ok := parserPerSchemaVersion[schemaVersion] + if !ok { + return fmt.Errorf("unknown schema version %d", schemaVersion) + } + + // Need if app was stopped with Ctrl-C. + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + app := tview.NewApplication() + ui := tui.NewUI(ctx, app, db, metabaseParser, nil) + + _ = ui.AddFilter("cid", tui.CIDParser, "CID") + _ = ui.AddFilter("oid", tui.OIDParser, "OID") + _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") + _ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]") + + err = ui.WithPrompt(initialPrompt) + if err != nil { + return fmt.Errorf("invalid filter prompt: %w", err) + } + + app.SetRoot(ui, true).SetFocus(ui) + return app.Run() +} + +var ( + shardInfoBucket = []byte{5} + versionRecord = []byte("version") +) + +func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { + err := db.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(shardInfoBucket) + if bkt == nil { + return nil + } + rec := bkt.Get(versionRecord) + if rec == nil { + return nil + } + + version = binary.LittleEndian.Uint64(rec) + ok = true + + return nil + }) + if err != nil { + common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) + } + + return +} diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go new file mode 100644 index 000000000..02b6cf414 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go @@ -0,0 +1,96 @@ +package blobovnicza + +import ( + "encoding/binary" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" +) + +var BlobovniczaParser = common.WithFallback( + common.Any( + MetaBucketParser, + BucketParser, + ), + common.RawParser.ToFallbackParser(), +) + +func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, errors.New("not a bucket") + } + + if string(key) != "META" { + return nil, nil, errors.New("invalid bucket name") + } + + return &MetaBucket{}, MetaRecordParser, nil +} + +func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var r MetaRecord + + if len(key) == 0 { + return nil, nil, errors.New("invalid key") + } + + r.label = string(key) + r.count = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} + +func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, errors.New("not a bucket") + } + + size, n := binary.Varint(key) + if n <= 0 { + return nil, nil, errors.New("invalid size") + } + + return &Bucket{size: size}, RecordParser, nil +} + +func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + parts := strings.Split(string(key), "/") + + if len(parts) != 2 { + return nil, nil, errors.New("invalid key, expected address string /") + } + + cnrRaw, err := base58.Decode(parts[0]) + if err != nil { + return nil, nil, errors.New("can't decode CID string") + } + objRaw, err := base58.Decode(parts[1]) + if err != nil { + return nil, nil, errors.New("can't decode OID string") + } + + cnr := cid.ID{} + if err := cnr.Decode(cnrRaw); err != nil { + return nil, nil, fmt.Errorf("can't decode CID: %w", err) + } + obj := oid.ID{} + if err := obj.Decode(objRaw); err != nil { + return nil, nil, fmt.Errorf("can't decode OID: %w", err) + } + + var r Record + + r.addr.SetContainer(cnr) + r.addr.SetObject(obj) + + if err := r.object.Unmarshal(value); err != nil { + return nil, nil, errors.New("can't unmarshal object") + } + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go new file mode 100644 index 000000000..c7ed08cdd --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go @@ -0,0 +1,101 @@ +package blobovnicza + +import ( + "fmt" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/davecgh/go-spew/spew" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type ( + MetaBucket struct{} + + MetaRecord struct { + label string + count uint64 + } + + Bucket struct { + size int64 + } + + Record struct { + addr oid.Address + object objectSDK.Object + } +) + +func (b *MetaBucket) String() string { + return common.FormatSimple("META", tcell.ColorLime) +} + +func (b *MetaBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *MetaBucket) Filter(string, any) common.FilterResult { + return common.No +} + +func (r *MetaRecord) String() string { + return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count) +} + +func (r *MetaRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *MetaRecord) Filter(string, any) common.FilterResult { + return common.No +} + +func (b *Bucket) String() string { + return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime) +} + +func (b *Bucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *Bucket) Filter(typ string, _ any) common.FilterResult { + switch typ { + case "cid": + return common.Maybe + case "oid": + return common.Maybe + default: + return common.No + } +} + +func (r *Record) String() string { + return fmt.Sprintf( + "CID %s OID %s %c Object {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), + tview.Borders.Vertical, + ) +} + +func (r *Record) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *Record) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/common/format.go b/cmd/frostfs-lens/internal/schema/common/format.go new file mode 100644 index 000000000..4ed7e96f2 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/common/format.go @@ -0,0 +1,43 @@ +package common + +import ( + "fmt" + "strconv" + + "github.com/gdamore/tcell/v2" +) + +type FormatOptions struct { + Color tcell.Color + + Bold, + Italic, + Underline, + StrikeThrough bool +} + +func Format(s string, opts FormatOptions) string { + var boldTag, italicTag, underlineTag, strikeThroughTag string + + switch { + case opts.Bold: + boldTag = "b" + case opts.Italic: + italicTag = "i" + case opts.Underline: + underlineTag = "u" + case opts.StrikeThrough: + strikeThroughTag = "s" + } + + attrs := fmt.Sprintf( + "%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag, + ) + color := strconv.FormatInt(int64(opts.Color.Hex()), 16) + + return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s) +} + +func FormatSimple(s string, c tcell.Color) string { + return Format(s, FormatOptions{Color: c}) +} diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go new file mode 100644 index 000000000..55051554c --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/common/raw.go @@ -0,0 +1,31 @@ +package common + +import ( + "github.com/davecgh/go-spew/spew" + "github.com/gdamore/tcell/v2" + "github.com/mr-tron/base58" +) + +type RawEntry struct { + // key and value used for record dump. + // nolint:unused + key, value []byte +} + +var RawParser Parser = rawParser + +func rawParser(key, value []byte) (SchemaEntry, Parser, error) { + return &RawEntry{key: key, value: value}, rawParser, nil +} + +func (r *RawEntry) String() string { + return FormatSimple(base58.Encode(r.key), tcell.ColorRed) +} + +func (r *RawEntry) DetailedString() string { + return spew.Sdump(r) +} + +func (r *RawEntry) Filter(string, any) FilterResult { + return No +} diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go new file mode 100644 index 000000000..077a68785 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -0,0 +1,79 @@ +package common + +import ( + "errors" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" +) + +type FilterResult byte + +const ( + No FilterResult = iota + Maybe + Yes +) + +func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult { + var res FilterResult + if condition { + res = onSuccess + } else { + res = onFailure + } + return res +} + +type SchemaEntry interface { + String() string + DetailedString() string + Filter(typ string, val any) FilterResult +} + +type ( + Parser func(key, value []byte) (SchemaEntry, Parser, error) + FallbackParser func(key, value []byte) (SchemaEntry, Parser) +) + +func Any(parsers ...Parser) Parser { + return func(key, value []byte) (SchemaEntry, Parser, error) { + var errs error + for _, parser := range parsers { + ret, next, err := parser(key, value) + if err == nil { + return ret, next, nil + } + errs = errors.Join(errs, err) + } + return nil, nil, fmt.Errorf("no parser succeeded: %w", errs) + } +} + +func WithFallback(parser Parser, fallback FallbackParser) Parser { + if parser == nil { + return fallback.ToParser() + } + return func(key, value []byte) (SchemaEntry, Parser, error) { + entry, next, err := parser(key, value) + if err == nil { + return entry, WithFallback(next, fallback), nil + } + return fallback.ToParser()(key, value) + } +} + +func (fp FallbackParser) ToParser() Parser { + return func(key, value []byte) (SchemaEntry, Parser, error) { + entry, next := fp(key, value) + return entry, next, nil + } +} + +func (p Parser) ToFallbackParser() FallbackParser { + return func(key, value []byte) (SchemaEntry, Parser) { + entry, next, err := p(key, value) + assert.NoError(err, "couldn't use that parser as a fallback parser") + return entry, next + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go new file mode 100644 index 000000000..6a08a723e --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go @@ -0,0 +1,29 @@ +package buckets + +import ( + "github.com/davecgh/go-spew/spew" +) + +func (b *PrefixBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *PrefixContainerBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *UserBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *ContainerBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *UserAttributeKeyBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (b *UserAttributeValueBucket) DetailedString() string { + return spew.Sdump(*b) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go new file mode 100644 index 000000000..891c4004f --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go @@ -0,0 +1,81 @@ +package buckets + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" +) + +func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult { + switch typ { + case "cid": + return b.resolvers.cidResolver(false) + case "oid": + return b.resolvers.oidResolver(false) + default: + return common.No + } +} + +func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return b.resolvers.cidResolver(b.id.Equals(id)) + case "oid": + return b.resolvers.oidResolver(false) + default: + return common.No + } +} + +func (b *UserBucket) Filter(typ string, _ any) common.FilterResult { + switch typ { + case "cid": + return b.resolvers.cidResolver(false) + case "oid": + return b.resolvers.oidResolver(false) + default: + return common.No + } +} + +func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return b.resolvers.cidResolver(b.id.Equals(id)) + case "oid": + return b.resolvers.oidResolver(false) + default: + return common.No + } +} + +func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(b.id.Equals(id), common.Yes, common.No) + case "oid": + return common.Maybe + case "key": + key := val.(string) + return common.IfThenElse(b.key == key, common.Yes, common.No) + case "value": + return common.Maybe + default: + return common.No + } +} + +func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + return common.Maybe + case "value": + value := val.(string) + return common.IfThenElse(b.value == value, common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go new file mode 100644 index 000000000..4e6bbf08a --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go @@ -0,0 +1,126 @@ +package buckets + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records" +) + +var ( + GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: StrictResolver, + }) + + LockedParser = NewPrefixBucketParser( + Locked, + NewContainerBucketParser( + records.LockedRecordParser, + Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }, + ), + Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }, + ) + + ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: StrictResolver, + }) + + PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + OwnerParser = NewPrefixContainerBucketParser( + Owner, + NewUserBucketParser( + records.OwnerRecordParser, + Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }, + ), + Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }, + ) + + UserAttributeParserV2 = NewUserAttributeKeyBucketParser( + NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), + ) + + UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( + NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), + []string{"FilePath", "S3-Access-Box-CRDT-Name"}, + ) + + PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: StrictResolver, + }) + + ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: StrictResolver, + }) + + ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: StrictResolver, + }) + + ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) + + ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) +) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go new file mode 100644 index 000000000..42a24c594 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go @@ -0,0 +1,57 @@ +package buckets + +type Prefix byte + +const ( + Graveyard Prefix = iota + Garbage + ToMoveIt + ContainerVolume + Locked + ShardInfo + Primary + Lockers + _ + Tombstone + Small + Root + Owner + UserAttribute + PayloadHash + Parent + Split + ContainerCounters + ECInfo + ExpirationEpochToObject + ObjectToExpirationEpoch +) + +var x = map[Prefix]string{ + Graveyard: "Graveyard", + Garbage: "Garbage", + ToMoveIt: "To Move It", + ContainerVolume: "Container Volume", + Locked: "Locked", + ShardInfo: "Shard Info", + Primary: "Primary", + Lockers: "Lockers", + Tombstone: "Tombstone", + Small: "Small", + Root: "Root", + Owner: "Owner", + UserAttribute: "User Attribute", + PayloadHash: "Payload Hash", + Parent: "Parent", + Split: "Split", + ContainerCounters: "Container Counters", + ECInfo: "EC Info", + ExpirationEpochToObject: "Exp. Epoch to Object", + ObjectToExpirationEpoch: "Object to Exp. Epoch", +} + +func (p Prefix) String() string { + if s, ok := x[p]; ok { + return s + } + return "Unknown Prefix" +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go new file mode 100644 index 000000000..62d126f88 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go @@ -0,0 +1,48 @@ +package buckets + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "github.com/gdamore/tcell/v2" +) + +func (b *PrefixBucket) String() string { + return common.FormatSimple( + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + ) +} + +func (b *PrefixContainerBucket) String() string { + return fmt.Sprintf( + "%s CID %s", + common.FormatSimple( + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + ), + common.FormatSimple(b.id.String(), tcell.ColorAqua), + ) +} + +func (b *UserBucket) String() string { + return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua) +} + +func (b *ContainerBucket) String() string { + return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua) +} + +func (b *UserAttributeKeyBucket) String() string { + return fmt.Sprintf("%s CID %s ATTR-KEY %s", + common.FormatSimple( + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + ), + common.FormatSimple( + fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, + ), + common.FormatSimple(b.key, tcell.ColorAqua), + ) +} + +func (b *UserAttributeValueBucket) String() string { + return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go new file mode 100644 index 000000000..7355c3d9e --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go @@ -0,0 +1,177 @@ +package buckets + +import ( + "errors" + "slices" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/mr-tron/base58" +) + +type ( + PrefixBucket struct { + prefix Prefix + resolvers Resolvers + } + + PrefixContainerBucket struct { + prefix Prefix + id cid.ID + resolvers Resolvers + } + + ContainerBucket struct { + id cid.ID + resolvers Resolvers + } + + UserBucket struct { + id user.ID + resolvers Resolvers + } + + UserAttributeKeyBucket struct { + prefix Prefix + id cid.ID + key string + } + + UserAttributeValueBucket struct { + value string + } +) + +type ( + FilterResolver = func(result bool) common.FilterResult + + Resolvers struct { + cidResolver FilterResolver + oidResolver FilterResolver + } +) + +var ( + StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) } + LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) } +) + +var ( + ErrNotBucket = errors.New("not a bucket") + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") + ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") +) + +func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + if len(key) != 1 { + return nil, nil, ErrInvalidKeyLength + } + var b PrefixBucket + if b.prefix = Prefix(key[0]); b.prefix != prefix { + return nil, nil, ErrInvalidPrefix + } + b.resolvers = resolvers + return &b, next, nil + } +} + +func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + if len(key) != 33 { + return nil, nil, ErrInvalidKeyLength + } + var b PrefixContainerBucket + if b.prefix = Prefix(key[0]); b.prefix != prefix { + return nil, nil, ErrInvalidPrefix + } + if err := b.id.Decode(key[1:]); err != nil { + return nil, nil, err + } + b.resolvers = resolvers + return &b, next, nil + } +} + +func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + var b UserBucket + if err := b.id.DecodeString(base58.Encode(key)); err != nil { + return nil, nil, err + } + b.resolvers = resolvers + return &b, next, nil + } +} + +func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + var b ContainerBucket + if err := b.id.Decode(key); err != nil { + return nil, nil, err + } + b.resolvers = resolvers + return &b, next, nil + } +} + +func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { + return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) +} + +func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + if len(key) < 34 { + return nil, nil, ErrInvalidKeyLength + } + var b UserAttributeKeyBucket + if b.prefix = Prefix(key[0]); b.prefix != UserAttribute { + return nil, nil, ErrInvalidPrefix + } + if err := b.id.Decode(key[1:33]); err != nil { + return nil, nil, err + } + b.key = string(key[33:]) + + if len(keys) != 0 && !slices.Contains(keys, b.key) { + return nil, nil, ErrUnexpectedAttributeKey + } + + return &b, next, nil + } +} + +func NewUserAttributeValueBucketParser(next common.Parser) common.Parser { + return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, ErrNotBucket + } + if len(key) == 0 { + return nil, nil, ErrInvalidKeyLength + } + var b UserAttributeValueBucket + b.value = string(key) + return &b, next, nil + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go new file mode 100644 index 000000000..4cc9e8765 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go @@ -0,0 +1,52 @@ +package metabase + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" +) + +var MetabaseParserV3 = common.WithFallback( + common.Any( + buckets.GraveyardParser, + buckets.GarbageParser, + buckets.ContainerVolumeParser, + buckets.LockedParser, + buckets.ShardInfoParser, + buckets.PrimaryParser, + buckets.LockersParser, + buckets.TombstoneParser, + buckets.SmallParser, + buckets.RootParser, + buckets.UserAttributeParserV3, + buckets.ParentParser, + buckets.SplitParser, + buckets.ContainerCountersParser, + buckets.ECInfoParser, + buckets.ExpirationEpochToObjectParser, + buckets.ObjectToExpirationEpochParser, + ), + common.RawParser.ToFallbackParser(), +) + +var MetabaseParserV2 = common.WithFallback( + common.Any( + buckets.GraveyardParser, + buckets.GarbageParser, + buckets.ContainerVolumeParser, + buckets.LockedParser, + buckets.ShardInfoParser, + buckets.PrimaryParser, + buckets.LockersParser, + buckets.TombstoneParser, + buckets.SmallParser, + buckets.RootParser, + buckets.OwnerParser, + buckets.UserAttributeParserV2, + buckets.PayloadHashParser, + buckets.ParentParser, + buckets.SplitParser, + buckets.ContainerCountersParser, + buckets.ECInfoParser, + ), + common.RawParser.ToFallbackParser(), +) diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go new file mode 100644 index 000000000..477c4fc9d --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go @@ -0,0 +1,73 @@ +package records + +import ( + "github.com/davecgh/go-spew/spew" +) + +func (r *GraveyardRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *GarbageRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ContainerVolumeRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *LockedRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ShardInfoRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ObjectRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *SmallRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *RootRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *OwnerRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *UserAttributeRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *PayloadHashRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ParentRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *SplitRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ContainerCountersRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ECInfoRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ExpirationEpochToObjectRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ObjectToExpirationEpochRecord) DetailedString() string { + return spew.Sdump(*r) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go new file mode 100644 index 000000000..e038911d7 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go @@ -0,0 +1,168 @@ +package records + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ShardInfoRecord) Filter(string, any) common.FilterResult { + return common.No +} + +func (r *LockedRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *SmallRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *RootRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *PayloadHashRecord) Filter(string, any) common.FilterResult { + return common.No +} + +func (r *ParentRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *SplitRecord) Filter(string, any) common.FilterResult { + return common.No +} + +func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go new file mode 100644 index 000000000..5d846cb75 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go @@ -0,0 +1,293 @@ +package records + +import ( + "encoding/binary" + "errors" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +var ( + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") +) + +func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 64 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 64 { + return nil, nil, ErrInvalidValueLength + } + var ( + cnr cid.ID + obj oid.ID + r GraveyardRecord + ) + + _ = cnr.Decode(key[:32]) + _ = obj.Decode(key[32:]) + + r.object.SetContainer(cnr) + r.object.SetObject(obj) + + _ = cnr.Decode(value[:32]) + _ = obj.Decode(value[32:]) + + r.tombstone.SetContainer(cnr) + r.tombstone.SetObject(obj) + + return &r, nil, nil +} + +func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 64 { + return nil, nil, ErrInvalidKeyLength + } + var ( + cnr cid.ID + obj oid.ID + r GarbageRecord + ) + + _ = cnr.Decode(key[:32]) + _ = obj.Decode(key[32:]) + + r.addr.SetContainer(cnr) + r.addr.SetObject(obj) + + return &r, nil, nil +} + +func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + var r ContainerVolumeRecord + + _ = r.id.Decode(key) + r.volume = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} + +func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var ( + r LockedRecord + err error + ) + + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + if r.ids, err = DecodeOIDs(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) == 0 { + return nil, nil, ErrInvalidKeyLength + } + + var r ShardInfoRecord + if string(key) == "id" { + r.label = string(key) + r.value = shard.ID(value).String() + + return &r, nil, nil + } + + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + r.label = string(key) + r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10) + + return &r, nil, nil +} + +func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + var r ObjectRecord + + _ = r.id.Decode(key) + if err := r.object.Unmarshal(value); err != nil { + return nil, nil, err + } + + return &r, nil, nil +} + +func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var r SmallRecord + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + if len(value) != 0 { + x := string(value) + r.storageID = &x + } + return &r, nil, nil +} + +func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var r RootRecord + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + if len(value) == 0 { + return &r, nil, nil + } + r.info = &objectSDK.SplitInfo{} + if err := r.info.Unmarshal(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + var r OwnerRecord + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + var r UserAttributeRecord + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + var ( + err error + r PayloadHashRecord + ) + + r.checksum.SetSHA256([32]byte(key)) + if r.ids, err = DecodeOIDs(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var ( + r ParentRecord + err error + ) + if err = r.parent.Decode(key); err != nil { + return nil, nil, err + } + if r.ids, err = DecodeOIDs(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var ( + err error + r SplitRecord + ) + if err = r.id.UnmarshalBinary(key); err != nil { + return nil, nil, err + } + if r.ids, err = DecodeOIDs(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(value) != 24 { + return nil, nil, ErrInvalidValueLength + } + + var r ContainerCountersRecord + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + + r.logical = binary.LittleEndian.Uint64(value[:8]) + r.physical = binary.LittleEndian.Uint64(value[8:16]) + r.user = binary.LittleEndian.Uint64(value[16:24]) + + return &r, nil, nil +} + +func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + var ( + r ECInfoRecord + err error + ) + + if err := r.id.Decode(key); err != nil { + return nil, nil, err + } + if r.ids, err = DecodeOIDs(value); err != nil { + return nil, nil, err + } + return &r, nil, nil +} + +func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 72 { + return nil, nil, ErrInvalidKeyLength + } + + var ( + r ExpirationEpochToObjectRecord + err error + ) + + r.epoch = binary.BigEndian.Uint64(key[:8]) + if err = r.cnt.Decode(key[8:40]); err != nil { + return nil, nil, err + } + if err = r.obj.Decode(key[40:]); err != nil { + return nil, nil, err + } + + return &r, nil, nil +} + +func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + + var ( + r ObjectToExpirationEpochRecord + err error + ) + + if err = r.obj.Decode(key); err != nil { + return nil, nil, err + } + r.epoch = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go new file mode 100644 index 000000000..f71244625 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -0,0 +1,155 @@ +package records + +import ( + "fmt" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +func (r *GraveyardRecord) String() string { + return fmt.Sprintf( + "Object CID %s OID %s %c Tombstone CID %s OID %s", + common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua), + ) +} + +func (r *GarbageRecord) String() string { + return fmt.Sprintf( + "CID %-44s OID %-44s", + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), + ) +} + +func (r *ContainerVolumeRecord) String() string { + return fmt.Sprintf( + "CID %-44s %c %d", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + r.volume, + ) +} + +func (r *LockedRecord) String() string { + return fmt.Sprintf( + "Object OID %s %c Lockers [%d]OID {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + len(r.ids), + ) +} + +func (r *ShardInfoRecord) String() string { + return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value) +} + +func (r *ObjectRecord) String() string { + return fmt.Sprintf( + "OID %s %c Object {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + ) +} + +func (r *SmallRecord) String() string { + s := fmt.Sprintf( + "OID %s %c", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + ) + if r.storageID != nil { + s = fmt.Sprintf("%s %s", s, *r.storageID) + } + return s +} + +func (r *RootRecord) String() string { + s := fmt.Sprintf( + "Root OID %s %c", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + ) + if r.info != nil { + s += " Split info {...}" + } + return s +} + +func (r *OwnerRecord) String() string { + return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua) +} + +func (r *UserAttributeRecord) String() string { + return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua) +} + +func (r *PayloadHashRecord) String() string { + return fmt.Sprintf( + "Checksum %s %c [%d]OID {...}", + common.FormatSimple(r.checksum.String(), tcell.ColorAqua), + tview.Borders.Vertical, + len(r.ids), + ) +} + +func (r *ParentRecord) String() string { + return fmt.Sprintf( + "Parent OID %s %c [%d]OID {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua), + tview.Borders.Vertical, + len(r.ids), + ) +} + +func (r *SplitRecord) String() string { + return fmt.Sprintf( + "Split ID %s %c [%d]OID {...}", + common.FormatSimple(r.id.String(), tcell.ColorAqua), + tview.Borders.Vertical, + len(r.ids), + ) +} + +func (r *ContainerCountersRecord) String() string { + return fmt.Sprintf( + "CID %s %c logical %d, physical %d, user %d", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + r.logical, r.physical, r.user, + ) +} + +func (r *ECInfoRecord) String() string { + return fmt.Sprintf( + "OID %s %c [%d]OID {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), + tview.Borders.Vertical, + len(r.ids), + ) +} + +func (r *ExpirationEpochToObjectRecord) String() string { + return fmt.Sprintf( + "exp. epoch %s %c CID %s OID %s", + common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + ) +} + +func (r *ObjectToExpirationEpochRecord) String() string { + return fmt.Sprintf( + "OID %s %c exp. epoch %s", + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), + ) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go new file mode 100644 index 000000000..0809cad1a --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go @@ -0,0 +1,93 @@ +package records + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/google/uuid" +) + +type ( + GraveyardRecord struct { + object, tombstone oid.Address + } + + GarbageRecord struct { + addr oid.Address + } + + ContainerVolumeRecord struct { + id cid.ID + volume uint64 + } + + LockedRecord struct { + id oid.ID + ids []oid.ID + } + + ShardInfoRecord struct { + label string + value string + } + + ObjectRecord struct { + id oid.ID + object objectSDK.Object + } + + SmallRecord struct { + id oid.ID + storageID *string // optional + } + + RootRecord struct { + id oid.ID + info *objectSDK.SplitInfo // optional + } + + OwnerRecord struct { + id oid.ID + } + + UserAttributeRecord struct { + id oid.ID + } + + PayloadHashRecord struct { + checksum checksum.Checksum + ids []oid.ID + } + + ParentRecord struct { + parent oid.ID + ids []oid.ID + } + + SplitRecord struct { + id uuid.UUID + ids []oid.ID + } + + ContainerCountersRecord struct { + id cid.ID + logical, physical, user uint64 + } + + ECInfoRecord struct { + id oid.ID + ids []oid.ID + } + + ExpirationEpochToObjectRecord struct { + epoch uint64 + cnt cid.ID + obj oid.ID + } + + ObjectToExpirationEpochRecord struct { + obj oid.ID + epoch uint64 + } +) diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go new file mode 100644 index 000000000..d15d69146 --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go @@ -0,0 +1,20 @@ +package records + +import ( + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/nspcc-dev/neo-go/pkg/io" +) + +func DecodeOIDs(data []byte) ([]oid.ID, error) { + r := io.NewBinReaderFromBuf(data) + + size := r.ReadVarUint() + oids := make([]oid.ID, size) + + for i := range size { + if err := oids[i].Decode(r.ReadVarBytes()); err != nil { + return nil, err + } + } + return oids, nil +} diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go new file mode 100644 index 000000000..3bfe2608b --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -0,0 +1,63 @@ +package writecache + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" +) + +var WritecacheParser = common.WithFallback( + DefaultBucketParser, + common.RawParser.ToFallbackParser(), +) + +func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if value != nil { + return nil, nil, errors.New("not a bucket") + } + if !bytes.Equal(key, []byte{0}) { + return nil, nil, errors.New("invalid key") + } + return &DefaultBucket{}, DefaultRecordParser, nil +} + +func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + parts := strings.Split(string(key), "/") + + if len(parts) != 2 { + return nil, nil, errors.New("invalid key, expected address string /") + } + + cnrRaw, err := base58.Decode(parts[0]) + if err != nil { + return nil, nil, errors.New("can't decode CID string") + } + objRaw, err := base58.Decode(parts[1]) + if err != nil { + return nil, nil, errors.New("can't decode OID string") + } + + cnr := cid.ID{} + if err := cnr.Decode(cnrRaw); err != nil { + return nil, nil, fmt.Errorf("can't decode CID: %w", err) + } + obj := oid.ID{} + if err := obj.Decode(objRaw); err != nil { + return nil, nil, fmt.Errorf("can't decode OID: %w", err) + } + + var r DefaultRecord + + r.addr.SetContainer(cnr) + r.addr.SetObject(obj) + + r.data = value + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go new file mode 100644 index 000000000..11e6f3fcd --- /dev/null +++ b/cmd/frostfs-lens/internal/schema/writecache/types.go @@ -0,0 +1,68 @@ +package writecache + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/davecgh/go-spew/spew" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type ( + DefaultBucket struct{} + + DefaultRecord struct { + addr oid.Address + // data used for record dump. + // nolint:unused + data []byte + } +) + +func (b *DefaultBucket) String() string { + return common.FormatSimple("0 Default", tcell.ColorLime) +} + +func (r *DefaultRecord) String() string { + return fmt.Sprintf( + "CID %s OID %s %c Data {...}", + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), + tview.Borders.Vertical, + ) +} + +func (b *DefaultBucket) DetailedString() string { + return spew.Sdump(*b) +} + +func (r *DefaultRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult { + switch typ { + case "cid": + return common.Maybe + case "oid": + return common.Maybe + default: + return common.No + } +} + +func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go new file mode 100644 index 000000000..2d3b20792 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/buckets.go @@ -0,0 +1,251 @@ +package tui + +import ( + "context" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type BucketsView struct { + *tview.Box + + mu sync.Mutex + + view *tview.TreeView + nodeToUpdate *tview.TreeNode + + ui *UI + filter *Filter +} + +type bucketNode struct { + bucket *Bucket + filter *Filter +} + +func NewBucketsView(ui *UI, filter *Filter) *BucketsView { + return &BucketsView{ + Box: tview.NewBox(), + view: tview.NewTreeView(), + ui: ui, + filter: filter, + } +} + +func (v *BucketsView) Mount(_ context.Context) error { + root := tview.NewTreeNode(".") + root.SetExpanded(false) + root.SetSelectable(false) + root.SetReference(&bucketNode{ + bucket: &Bucket{NextParser: v.ui.rootParser}, + filter: v.filter, + }) + + v.nodeToUpdate = root + + v.view.SetRoot(root) + v.view.SetCurrentNode(root) + + return nil +} + +func (v *BucketsView) Update(ctx context.Context) error { + if v.nodeToUpdate == nil { + return nil + } + defer func() { v.nodeToUpdate = nil }() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ready := make(chan struct{}) + errCh := make(chan error) + + tmp := tview.NewTreeNode(v.nodeToUpdate.GetText()) + tmp.SetReference(v.nodeToUpdate.GetReference()) + + node := v.nodeToUpdate.GetReference().(*bucketNode) + + go func() { + defer close(ready) + + hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path) + if err != nil { + errCh <- err + } + + // Show the selected bucket's records instead. + if !hasBuckets && node.bucket.NextParser != nil { + v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter)) + } + + if v.nodeToUpdate.IsExpanded() { + return + } + + err = v.loadNodeChildren(ctx, tmp, node.filter) + if err != nil { + errCh <- err + } + }() + + select { + case <-ctx.Done(): + case <-ready: + v.mu.Lock() + v.nodeToUpdate.SetChildren(tmp.GetChildren()) + v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded()) + v.mu.Unlock() + case err := <-errCh: + return err + } + + return nil +} + +func (v *BucketsView) Unmount() { +} + +func (v *BucketsView) Draw(screen tcell.Screen) { + x, y, width, height := v.GetInnerRect() + v.view.SetRect(x, y, width, height) + + v.view.Draw(screen) +} + +func (v *BucketsView) loadNodeChildren( + ctx context.Context, node *tview.TreeNode, filter *Filter, +) error { + parentBucket := node.GetReference().(*bucketNode).bucket + + path := parentBucket.Path + parser := parentBucket.NextParser + + buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) + + for item := range buffer { + if item.err != nil { + return item.err + } + bucket := item.val + + var err error + bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) + if err != nil { + return err + } + + satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter) + if err != nil { + return err + } + if !satisfies { + continue + } + + child := tview.NewTreeNode(bucket.Entry.String()). + SetSelectable(true). + SetExpanded(false). + SetReference(&bucketNode{ + bucket: bucket, + filter: filter.Apply(bucket.Entry), + }) + + node.AddChild(child) + } + + return nil +} + +func (v *BucketsView) bucketSatisfiesFilter( + ctx context.Context, bucket *Bucket, filter *Filter, +) (bool, error) { + // Does the current bucket satisfies the filter? + filter = filter.Apply(bucket.Entry) + + if filter.Result() == common.Yes { + return true, nil + } + + if filter.Result() == common.No { + return false, nil + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Check the current bucket's nested buckets if exist + bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + + for item := range bucketsBuffer { + if item.err != nil { + return false, item.err + } + b := item.val + + var err error + b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) + if err != nil { + return false, err + } + + satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter) + if err != nil { + return false, err + } + if satisfies { + return true, nil + } + } + + // Check the current bucket's nested records if exist + recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + + for item := range recordsBuffer { + if item.err != nil { + return false, item.err + } + r := item.val + + var err error + r.Entry, _, err = bucket.NextParser(r.Key, r.Value) + if err != nil { + return false, err + } + + if filter.Apply(r.Entry).Result() == common.Yes { + return true, nil + } + } + + return false, nil +} + +func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { + return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { + currentNode := v.view.GetCurrentNode() + if currentNode == nil { + return + } + + switch event.Key() { + case tcell.KeyEnter: + // Expand or collapse the selected bucket's nested buckets, + // otherwise, navigate to that bucket's records. + v.nodeToUpdate = currentNode + case tcell.KeyCtrlR: + // Navigate to the selected bucket's records. + bucketNode := currentNode.GetReference().(*bucketNode) + v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter)) + case tcell.KeyCtrlD: + // Navigate to the selected bucket's detailed view. + bucketNode := currentNode.GetReference().(*bucketNode) + v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString())) + default: + v.view.InputHandler()(event, func(tview.Primitive) {}) + } + }) +} diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go new file mode 100644 index 000000000..94fa87f98 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/db.go @@ -0,0 +1,151 @@ +package tui + +import ( + "context" + "errors" + "fmt" + + "go.etcd.io/bbolt" +) + +type Item[T any] struct { + val T + err error +} + +func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) { + if len(path) == 0 { + return nil, errors.New("can't find bucket without path") + } + + name := path[0] + bucket := tx.Bucket(name) + if bucket == nil { + return nil, fmt.Errorf("no bucket with name %s", name) + } + for _, name := range path[1:] { + bucket = bucket.Bucket(name) + if bucket == nil { + return nil, fmt.Errorf("no bucket with name %s", name) + } + } + return bucket, nil +} + +func load[T any]( + ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, + filter func(key, value []byte) bool, transform func(key, value []byte) T, +) <-chan Item[T] { + buffer := make(chan Item[T], bufferSize) + + go func() { + defer close(buffer) + + err := db.View(func(tx *bbolt.Tx) error { + var cursor *bbolt.Cursor + if len(path) == 0 { + cursor = tx.Cursor() + } else { + bucket, err := resolvePath(tx, path) + if err != nil { + buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)} + return nil + } + cursor = bucket.Cursor() + } + + key, value := cursor.First() + for { + if key == nil { + return nil + } + if filter != nil && !filter(key, value) { + key, value = cursor.Next() + continue + } + + select { + case <-ctx.Done(): + return nil + case buffer <- Item[T]{val: transform(key, value)}: + key, value = cursor.Next() + } + } + }) + if err != nil { + buffer <- Item[T]{err: err} + } + }() + + return buffer +} + +func LoadBuckets( + ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, +) <-chan Item[*Bucket] { + buffer := load( + ctx, db, path, bufferSize, + func(_, value []byte) bool { + return value == nil + }, + func(key, _ []byte) *Bucket { + base := make([][]byte, 0, len(path)) + base = append(base, path...) + + return &Bucket{ + Name: key, + Path: append(base, key), + } + }, + ) + + return buffer +} + +func LoadRecords( + ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, +) <-chan Item[*Record] { + buffer := load( + ctx, db, path, bufferSize, + func(_, value []byte) bool { + return value != nil + }, + func(key, value []byte) *Record { + base := make([][]byte, 0, len(path)) + base = append(base, path...) + + return &Record{ + Key: key, + Value: value, + Path: append(base, key), + } + }, + ) + + return buffer +} + +// HasBuckets checks if a bucket has nested buckets. It relies on assumption +// that a bucket can have either nested buckets or records but not both. +func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + buffer := load( + ctx, db, path, 1, + nil, + func(_, value []byte) []byte { return value }, + ) + + x, ok := <-buffer + if !ok { + return false, nil + } + if x.err != nil { + return false, x.err + } + if x.val != nil { + return false, nil + } + return true, nil +} diff --git a/cmd/frostfs-lens/internal/tui/detailed.go b/cmd/frostfs-lens/internal/tui/detailed.go new file mode 100644 index 000000000..b2d897230 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/detailed.go @@ -0,0 +1,24 @@ +package tui + +import ( + "context" + + "github.com/rivo/tview" +) + +type DetailedView struct { + *tview.TextView +} + +func NewDetailedView(detailed string) *DetailedView { + v := &DetailedView{ + TextView: tview.NewTextView(), + } + v.SetDynamicColors(true) + v.SetText(detailed) + return v +} + +func (v *DetailedView) Mount(_ context.Context) error { return nil } +func (v *DetailedView) Update(_ context.Context) error { return nil } +func (v *DetailedView) Unmount() {} diff --git a/cmd/frostfs-lens/internal/tui/filter.go b/cmd/frostfs-lens/internal/tui/filter.go new file mode 100644 index 000000000..e7879eca7 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/filter.go @@ -0,0 +1,44 @@ +package tui + +import ( + "maps" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" +) + +type Filter struct { + values map[string]any + results map[string]common.FilterResult +} + +func NewFilter(values map[string]any) *Filter { + f := &Filter{ + values: maps.Clone(values), + results: make(map[string]common.FilterResult), + } + for tag := range values { + f.results[tag] = common.No + } + return f +} + +func (f *Filter) Apply(e common.SchemaEntry) *Filter { + filter := &Filter{ + values: f.values, + results: maps.Clone(f.results), + } + + for tag, value := range filter.values { + filter.results[tag] = max(filter.results[tag], e.Filter(tag, value)) + } + + return filter +} + +func (f *Filter) Result() common.FilterResult { + current := common.Yes + for _, r := range f.results { + current = min(r, current) + } + return current +} diff --git a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt new file mode 100644 index 000000000..c371b34e9 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt @@ -0,0 +1,38 @@ +[green::b]HOTKEYS[-::-] + + [green::b]Navigation[-::-] + + [yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-] + Scroll down. + + [yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-] + Scroll up. + + [yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-] + Scroll down by a full page. + + [yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-] + Scroll up by a full page. + + [green::b]Actions[-::-] + + [yellow::b]Enter[-::-] + Perform actions based on the current context: + - In Buckets View: + - Expand/collapse the selected bucket to show/hide its nested buckets. + - If no nested buckets exist, navigate to the selected bucket's records. + - In Records View: Open the detailed view of the selected record. + + [yellow::b]Escape[-::-] + Return to the previous page, opposite of [yellow::b]Enter[-::-]. + + Refer to the [green::b]SEARCHING[-::-] section for more specific actions. + + + [green::b]Alternative Action Hotkeys[-::-] + + [yellow::b]Ctrl-r[-::-] + Directly navigate to the selected bucket's records. + + [yellow::b]Ctrl-d[-::-] + Access the detailed view of the selected bucket. diff --git a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt new file mode 100644 index 000000000..bc2be512b --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt @@ -0,0 +1,26 @@ +[green::b]SEARCHING[-::-] + + [green::b]Hotkeys[-::-] + + [yellow::b]/[-::-] + Initiate the search prompt. + - The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-] + - Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets. + - Any leading and trailing whitespace will be ignored. + - An empty prompt will return all results with no filters applied. + - Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags. + + [yellow::b]Enter[-::-] + Execute the search based on the entered prompt. + - If the prompt is invalid, an error message will be displayed. + + [yellow::b]Escape[-::-] + Exit the search prompt without performing a search. + + [yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-] + Scroll through the search history. + + + [green::b]Available Search Filters[-::-] + +%s diff --git a/cmd/frostfs-lens/internal/tui/help.go b/cmd/frostfs-lens/internal/tui/help.go new file mode 100644 index 000000000..3ab8fede0 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/help.go @@ -0,0 +1,101 @@ +package tui + +import ( + _ "embed" + "fmt" + "strings" + + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +var ( + //go:embed help-pages/hotkeys.txt + hotkeysHelpText string + + //go:embed help-pages/searching.txt + searchingHelpText string +) + +type HelpPage struct { + *tview.Box + pages []*tview.TextView + currentPage int + + filters []string + filterHints map[string]string +} + +func NewHelpPage(filters []string, hints map[string]string) *HelpPage { + hp := &HelpPage{ + Box: tview.NewBox(), + filters: filters, + filterHints: hints, + } + + page := tview.NewTextView(). + SetDynamicColors(true). + SetText(hotkeysHelpText) + hp.addPage(page) + + page = tview.NewTextView(). + SetDynamicColors(true). + SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText())) + hp.addPage(page) + + return hp +} + +func (hp *HelpPage) addPage(page *tview.TextView) { + hp.pages = append(hp.pages, page) +} + +func (hp *HelpPage) getFiltersText() string { + if len(hp.filters) == 0 { + return "\t\tNo filters defined.\n" + } + + filtersText := strings.Builder{} + gapSize := 4 + + tagMaxWidth := 3 + for _, filter := range hp.filters { + tagMaxWidth = max(tagMaxWidth, len(filter)) + } + filtersText.WriteString("\t\t[yellow::b]Tag") + filtersText.WriteString(strings.Repeat(" ", gapSize)) + filtersText.WriteString("\tValue[-::-]\n\n") + + for _, filter := range hp.filters { + filtersText.WriteString("\t\t") + filtersText.WriteString(filter) + filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize)) + filtersText.WriteString(hp.filterHints[filter]) + filtersText.WriteRune('\n') + } + + return filtersText.String() +} + +func (hp *HelpPage) Draw(screen tcell.Screen) { + x, y, width, height := hp.GetInnerRect() + hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2) + hp.pages[hp.currentPage].Draw(screen) +} + +func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { + return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { + if event.Key() == tcell.KeyEnter { + hp.currentPage++ + hp.currentPage %= len(hp.pages) + return + } + hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {}) + }) +} + +func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) { + return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) { + return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {}) + }) +} diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go new file mode 100644 index 000000000..471514e5d --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -0,0 +1,79 @@ +package tui + +import ( + "slices" + + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type InputFieldWithHistory struct { + *tview.InputField + history []string + historyLimit int + historyPointer int + currentContent string +} + +func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory { + return &InputFieldWithHistory{ + InputField: tview.NewInputField(), + historyLimit: historyLimit, + } +} + +func (f *InputFieldWithHistory) AddToHistory(s string) { + // Stop scrolling history on history change, need to start scrolling again. + defer func() { f.historyPointer = len(f.history) }() + + // Used history data for search prompt, so just make that data recent. + if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { + f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) + f.history = append(f.history, s) + } + + if len(f.history) == f.historyLimit { + f.history = f.history[1:] + } + f.history = append(f.history, s) +} + +func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { + return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { + switch event.Key() { + case tcell.KeyDown: + if len(f.history) == 0 { + return + } + // Need to start iterating before. + if f.historyPointer == len(f.history) { + return + } + // Iterate to most recent prompts. + f.historyPointer++ + // Stop iterating over history. + if f.historyPointer == len(f.history) { + f.SetText(f.currentContent) + return + } + f.SetText(f.history[f.historyPointer]) + case tcell.KeyUp: + if len(f.history) == 0 { + return + } + // Start iterating over history. + if f.historyPointer == len(f.history) { + f.currentContent = f.GetText() + } + // End of history. + if f.historyPointer == 0 { + return + } + // Iterate to least recent prompts. + f.historyPointer-- + f.SetText(f.history[f.historyPointer]) + default: + f.InputField.InputHandler()(event, func(tview.Primitive) {}) + } + }) +} diff --git a/cmd/frostfs-lens/internal/tui/loading.go b/cmd/frostfs-lens/internal/tui/loading.go new file mode 100644 index 000000000..4b9384ad4 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/loading.go @@ -0,0 +1,72 @@ +package tui + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type LoadingBar struct { + *tview.Box + view *tview.TextView + secondsElapsed atomic.Int64 + needDrawFunc func() + reset func() +} + +func NewLoadingBar(needDrawFunc func()) *LoadingBar { + b := &LoadingBar{ + Box: tview.NewBox(), + view: tview.NewTextView(), + needDrawFunc: needDrawFunc, + } + b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor) + b.view.SetTextColor(b.GetBackgroundColor()) + + return b +} + +func (b *LoadingBar) Start(ctx context.Context) { + ctx, b.reset = context.WithCancel(ctx) + + go func() { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + b.secondsElapsed.Store(0) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + b.secondsElapsed.Add(1) + b.needDrawFunc() + } + } + }() +} + +func (b *LoadingBar) Stop() { + b.reset() +} + +func (b *LoadingBar) Draw(screen tcell.Screen) { + seconds := b.secondsElapsed.Load() + + var time string + switch { + case seconds < 60: + time = fmt.Sprintf("%ds", seconds) + default: + time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60) + } + b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time)) + + x, y, width, _ := b.GetInnerRect() + b.view.SetRect(x, y, width, 1) + b.view.Draw(screen) +} diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go new file mode 100644 index 000000000..a4d392ab3 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -0,0 +1,268 @@ +package tui + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" +) + +type updateType int + +const ( + other updateType = iota + moveToPrevPage + moveToNextPage + moveUp + moveDown + moveHome + moveEnd +) + +type RecordsView struct { + *tview.Box + + mu sync.RWMutex + + onUnmount func() + + bucket *Bucket + records []*Record + + buffer chan *Record + + firstRecordIndex int + lastRecordIndex int + selectedRecordIndex int + + updateType updateType + + ui *UI + filter *Filter +} + +func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView { + return &RecordsView{ + Box: tview.NewBox(), + bucket: bucket, + ui: ui, + filter: filter, + } +} + +func (v *RecordsView) Mount(ctx context.Context) error { + if v.onUnmount != nil { + return errors.New("try to mount already mounted component") + } + + ctx, v.onUnmount = context.WithCancel(ctx) + + tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) + + v.buffer = make(chan *Record, v.ui.loadBufferSize) + go func() { + defer close(v.buffer) + + for item := range tempBuffer { + if item.err != nil { + v.ui.stopOnError(item.err) + break + } + record := item.val + + var err error + record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) + if err != nil { + v.ui.stopOnError(err) + break + } + + if v.filter.Apply(record.Entry).Result() != common.Yes { + continue + } + + v.buffer <- record + } + }() + + return nil +} + +func (v *RecordsView) Unmount() { + assert.False(v.onUnmount == nil, "try to unmount not mounted component") + v.onUnmount() + v.onUnmount = nil +} + +func (v *RecordsView) Update(ctx context.Context) error { + _, _, _, recordsPerPage := v.GetInnerRect() + firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes() + +loop: + for len(v.records) < lastRecordIndex { + select { + case <-ctx.Done(): + return nil + case record, ok := <-v.buffer: + if !ok { + break loop + } + v.records = append(v.records, record) + } + } + + // Set the update type to its default value after some specific key event + // has been handled. + v.updateType = other + + firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage)) + lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records)) + selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1) + + v.mu.Lock() + v.firstRecordIndex = firstRecordIndex + v.lastRecordIndex = lastRecordIndex + v.selectedRecordIndex = selectedRecordIndex + v.mu.Unlock() + + return nil +} + +func (v *RecordsView) getNewIndexes() (int, int, int) { + v.mu.RLock() + firstRecordIndex := v.firstRecordIndex + lastRecordIndex := v.lastRecordIndex + selectedRecordIndex := v.selectedRecordIndex + v.mu.RUnlock() + + _, _, _, recordsPerPage := v.GetInnerRect() + + switch v.updateType { + case moveUp: + if selectedRecordIndex != firstRecordIndex { + selectedRecordIndex-- + break + } + firstRecordIndex = max(0, firstRecordIndex-1) + lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records)) + selectedRecordIndex = firstRecordIndex + case moveToPrevPage: + if selectedRecordIndex != firstRecordIndex { + selectedRecordIndex = firstRecordIndex + break + } + firstRecordIndex = max(0, firstRecordIndex-recordsPerPage) + lastRecordIndex = firstRecordIndex + recordsPerPage + selectedRecordIndex = firstRecordIndex + case moveDown: + if selectedRecordIndex != lastRecordIndex-1 { + selectedRecordIndex++ + break + } + firstRecordIndex++ + lastRecordIndex++ + selectedRecordIndex++ + case moveToNextPage: + if selectedRecordIndex != lastRecordIndex-1 { + selectedRecordIndex = lastRecordIndex - 1 + break + } + firstRecordIndex += recordsPerPage + lastRecordIndex = firstRecordIndex + recordsPerPage + selectedRecordIndex = lastRecordIndex - 1 + case moveHome: + firstRecordIndex = 0 + lastRecordIndex = firstRecordIndex + recordsPerPage + selectedRecordIndex = 0 + case moveEnd: + lastRecordIndex = math.MaxInt32 + firstRecordIndex = lastRecordIndex - recordsPerPage + selectedRecordIndex = lastRecordIndex - 1 + default: + lastRecordIndex = firstRecordIndex + recordsPerPage + } + + return firstRecordIndex, lastRecordIndex, selectedRecordIndex +} + +func (v *RecordsView) GetInnerRect() (int, int, int, int) { + x, y, width, height := v.Box.GetInnerRect() + + // Left padding. + x = min(x+3, x+width-1) + width = max(width-3, 0) + + return x, y, width, height +} + +func (v *RecordsView) Draw(screen tcell.Screen) { + v.mu.RLock() + firstRecordIndex := v.firstRecordIndex + lastRecordIndex := v.lastRecordIndex + selectedRecordIndex := v.selectedRecordIndex + records := v.records + v.mu.RUnlock() + + v.DrawForSubclass(screen, v) + + x, y, width, height := v.GetInnerRect() + if height == 0 { + return + } + + // No records in that bucket. + if firstRecordIndex == lastRecordIndex { + tview.Print( + screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor, + ) + return + } + + for index := firstRecordIndex; index < lastRecordIndex; index++ { + result := records[index].Entry + text := result.String() + + if index == selectedRecordIndex { + text = fmt.Sprintf("[:white]%s[:-]", text) + tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor) + } else { + tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor) + } + + y++ + } +} + +func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) { + return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) { + switch m, k := event.Modifiers(), event.Key(); { + case m == 0 && k == tcell.KeyPgUp: + v.updateType = moveToPrevPage + case m == 0 && k == tcell.KeyPgDn: + v.updateType = moveToNextPage + case m == 0 && k == tcell.KeyUp: + v.updateType = moveUp + case m == 0 && k == tcell.KeyDown: + v.updateType = moveDown + case m == 0 && k == tcell.KeyHome: + v.updateType = moveHome + case m == 0 && k == tcell.KeyEnd: + v.updateType = moveEnd + case k == tcell.KeyEnter: + v.mu.RLock() + selectedRecordIndex := v.selectedRecordIndex + records := v.records + v.mu.RUnlock() + if len(records) != 0 { + current := records[selectedRecordIndex] + v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString())) + } + } + }) +} diff --git a/cmd/frostfs-lens/internal/tui/types.go b/cmd/frostfs-lens/internal/tui/types.go new file mode 100644 index 000000000..4a227fe64 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/types.go @@ -0,0 +1,18 @@ +package tui + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" +) + +type Bucket struct { + Name []byte + Path [][]byte + Entry common.SchemaEntry + NextParser common.Parser +} + +type Record struct { + Key, Value []byte + Path [][]byte + Entry common.SchemaEntry +} diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go new file mode 100644 index 000000000..cc6b7859e --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -0,0 +1,561 @@ +package tui + +import ( + "context" + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "github.com/davecgh/go-spew/spew" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" + "go.etcd.io/bbolt" +) + +type Config struct { + LoadBufferSize int + SearchHistorySize int + LoadingIndicatorLag time.Duration +} + +var DefaultConfig = Config{ + LoadBufferSize: 100, + SearchHistorySize: 100, + LoadingIndicatorLag: 500 * time.Millisecond, +} + +type Primitive interface { + tview.Primitive + + Mount(ctx context.Context) error + Update(ctx context.Context) error + Unmount() +} + +type UI struct { + *tview.Box + + // Need to use context while updating pages those read data from a database. + // Context should be shared among all mount and updates. Current TUI library + // doesn't use contexts at all, so I do that feature by myself. + //nolint:containedctx + ctx context.Context + onStop func() + + app *tview.Application + db *bbolt.DB + + pageHistory []Primitive + mountedPage Primitive + + pageToMount Primitive + + pageStub tview.Primitive + + infoBar *tview.TextView + searchBar *InputFieldWithHistory + loadingBar *LoadingBar + helpBar *tview.TextView + + helpPage *HelpPage + + searchErrorBar *tview.TextView + + isSearching bool + isLoading atomic.Bool + isShowingError bool + isShowingHelp bool + + loadBufferSize int + + rootParser common.Parser + + loadingIndicatorLag time.Duration + + cancelLoading func() + + filters map[string]func(string) (any, error) + compositeFilters map[string]func(string) (map[string]any, error) + filterHints map[string]string +} + +func NewUI( + ctx context.Context, + app *tview.Application, + db *bbolt.DB, + rootParser common.Parser, + cfg *Config, +) *UI { + spew.Config.DisableMethods = true + + if cfg == nil { + cfg = &DefaultConfig + } + + ui := &UI{ + Box: tview.NewBox(), + + app: app, + db: db, + rootParser: rootParser, + + filters: make(map[string]func(string) (any, error)), + compositeFilters: make(map[string]func(string) (map[string]any, error)), + filterHints: make(map[string]string), + + loadBufferSize: cfg.LoadBufferSize, + loadingIndicatorLag: cfg.LoadingIndicatorLag, + } + + ui.ctx, ui.onStop = context.WithCancel(ctx) + + backgroundColor := ui.GetBackgroundColor() + textColor := tview.Styles.PrimaryTextColor + + inverseBackgroundColor := textColor + inverseTextColor := backgroundColor + + alertTextColor := tcell.ColorRed + + ui.pageStub = tview.NewBox() + + ui.infoBar = tview.NewTextView() + ui.infoBar.SetBackgroundColor(inverseBackgroundColor) + ui.infoBar.SetTextColor(inverseTextColor) + ui.infoBar.SetText( + fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()), + ) + + ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize) + ui.searchBar.SetFieldBackgroundColor(backgroundColor) + ui.searchBar.SetFieldTextColor(textColor) + ui.searchBar.SetLabelColor(textColor) + ui.searchBar.Focus(nil) + ui.searchBar.SetLabel("/") + + ui.searchErrorBar = tview.NewTextView() + ui.searchErrorBar.SetBackgroundColor(backgroundColor) + ui.searchErrorBar.SetTextColor(alertTextColor) + + ui.helpBar = tview.NewTextView() + ui.helpBar.SetBackgroundColor(inverseBackgroundColor) + ui.helpBar.SetTextColor(inverseTextColor) + ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ") + + ui.loadingBar = NewLoadingBar(ui.triggerDraw) + + ui.pageToMount = NewBucketsView(ui, NewFilter(nil)) + + return ui +} + +func (ui *UI) checkFilterExists(typ string) bool { + if _, ok := ui.filters[typ]; ok { + return true + } + if _, ok := ui.compositeFilters[typ]; ok { + return true + } + return false +} + +func (ui *UI) AddFilter( + typ string, + parser func(string) (any, error), + helpHint string, +) error { + if ui.checkFilterExists(typ) { + return fmt.Errorf("filter %s already exists", typ) + } + ui.filters[typ] = parser + ui.filterHints[typ] = helpHint + return nil +} + +func (ui *UI) AddCompositeFilter( + typ string, + parser func(string) (map[string]any, error), + helpHint string, +) error { + if ui.checkFilterExists(typ) { + return fmt.Errorf("filter %s already exists", typ) + } + ui.compositeFilters[typ] = parser + ui.filterHints[typ] = helpHint + return nil +} + +func (ui *UI) stopOnError(err error) { + if err != nil { + ui.onStop() + ui.app.QueueEvent(tcell.NewEventError(err)) + } +} + +func (ui *UI) stop() { + ui.onStop() + ui.app.Stop() +} + +func (ui *UI) movePrevPage() { + if len(ui.pageHistory) != 0 { + ui.mountedPage.Unmount() + ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1] + ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1] + ui.triggerDraw() + } +} + +func (ui *UI) moveNextPage(page Primitive) { + ui.pageToMount = page + ui.triggerDraw() +} + +func (ui *UI) triggerDraw() { + go ui.app.QueueUpdateDraw(func() {}) +} + +func (ui *UI) Draw(screen tcell.Screen) { + if ui.isLoading.Load() { + ui.draw(screen) + return + } + + ui.isLoading.Store(true) + + ctx, cancel := context.WithCancel(ui.ctx) + + ready := make(chan struct{}) + go func() { + ui.load(ctx) + + cancel() + close(ready) + ui.isLoading.Store(false) + }() + + select { + case <-ready: + case <-time.After(ui.loadingIndicatorLag): + ui.loadingBar.Start(ui.ctx) + ui.cancelLoading = cancel + + go func() { + <-ready + ui.loadingBar.Stop() + ui.triggerDraw() + }() + } + + ui.draw(screen) +} + +func (ui *UI) load(ctx context.Context) { + if ui.mountedPage == nil && ui.pageToMount == nil { + ui.stop() + return + } + + if ui.pageToMount != nil { + ui.mountAndUpdate(ctx) + } else { + ui.update(ctx) + } +} + +func (ui *UI) draw(screen tcell.Screen) { + ui.DrawForSubclass(screen, ui) + x, y, width, height := ui.GetInnerRect() + + var ( + pageToDraw tview.Primitive + barToDraw tview.Primitive + ) + + switch { + case ui.isShowingHelp: + if ui.helpPage == nil { + var filters []string + for f := range ui.filters { + filters = append(filters, f) + } + for f := range ui.compositeFilters { + filters = append(filters, f) + } + ui.helpPage = NewHelpPage(filters, ui.filterHints) + } + pageToDraw = ui.helpPage + case ui.mountedPage != nil: + pageToDraw = ui.mountedPage + default: + pageToDraw = ui.pageStub + } + + pageToDraw.SetRect(x, y, width, height-1) + pageToDraw.Draw(screen) + + // Search bar uses cursor and we need to hide it when another bar is drawn. + screen.HideCursor() + + switch { + case ui.isLoading.Load(): + barToDraw = ui.loadingBar + case ui.isSearching: + barToDraw = ui.searchBar + case ui.isShowingError: + barToDraw = ui.searchErrorBar + case ui.isShowingHelp: + barToDraw = ui.helpBar + default: + barToDraw = ui.infoBar + } + + barToDraw.SetRect(x, y+height-1, width, 1) + barToDraw.Draw(screen) +} + +func (ui *UI) mountAndUpdate(ctx context.Context) { + defer func() { + // Operation succeeded or was canceled, either way reset page to mount. + ui.pageToMount = nil + }() + + // Mount should use app global context. + //nolint:contextcheck + err := ui.pageToMount.Mount(ui.ctx) + if err != nil { + ui.stopOnError(err) + return + } + + x, y, width, height := ui.GetInnerRect() + ui.pageToMount.SetRect(x, y, width, height-1) + + s := loadOp(ctx, ui.pageToMount.Update) + if s.err != nil { + ui.pageToMount.Unmount() + ui.stopOnError(s.err) + return + } + // Update was canceled. + if !s.done { + ui.pageToMount.Unmount() + return + } + + if ui.mountedPage != nil { + ui.pageHistory = append(ui.pageHistory, ui.mountedPage) + } + ui.mountedPage = ui.pageToMount +} + +func (ui *UI) update(ctx context.Context) { + x, y, width, height := ui.GetInnerRect() + ui.mountedPage.SetRect(x, y, width, height-1) + + s := loadOp(ctx, ui.mountedPage.Update) + if s.err != nil { + ui.stopOnError(s.err) + return + } +} + +type status struct { + done bool + err error +} + +func loadOp(ctx context.Context, op func(ctx context.Context) error) status { + errCh := make(chan error) + go func() { + errCh <- op(ctx) + }() + + select { + case <-ctx.Done(): + return status{done: false, err: nil} + case err := <-errCh: + return status{done: true, err: err} + } +} + +func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { + return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { + switch { + case ui.isLoading.Load(): + ui.handleInputOnLoading(event) + case ui.isShowingHelp: + ui.handleInputOnShowingHelp(event) + case ui.isShowingError: + ui.handleInputOnShowingError() + case ui.isSearching: + ui.handleInputOnSearching(event) + default: + ui.handleInput(event) + } + }) +} + +func (ui *UI) handleInput(event *tcell.EventKey) { + m, k, r := event.Modifiers(), event.Key(), event.Rune() + + switch { + case k == tcell.KeyEsc: + ui.movePrevPage() + case m == 0 && k == tcell.KeyRune && r == 'h': + ui.isShowingHelp = true + case m == 0 && k == tcell.KeyRune && r == '/': + ui.isSearching = true + case m == 0 && k == tcell.KeyRune && r == 'q': + ui.stop() + default: + if ui.mountedPage != nil { + ui.mountedPage.InputHandler()(event, func(tview.Primitive) {}) + } + } +} + +func (ui *UI) handleInputOnLoading(event *tcell.EventKey) { + switch k, r := event.Key(), event.Rune(); { + case k == tcell.KeyEsc: + ui.cancelLoading() + case k == tcell.KeyRune && r == 'q': + ui.stop() + } +} + +func (ui *UI) handleInputOnShowingError() { + ui.isShowingError = false + ui.isSearching = true +} + +func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) { + k, r := event.Key(), event.Rune() + + switch { + case k == tcell.KeyEsc: + ui.isShowingHelp = false + case k == tcell.KeyRune && r == 'q': + ui.stop() + default: + ui.helpPage.InputHandler()(event, func(tview.Primitive) {}) + } +} + +func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { + m, k := event.Modifiers(), event.Key() + + switch { + case k == tcell.KeyEnter: + prompt := ui.searchBar.GetText() + + res, err := ui.processPrompt(prompt) + if err != nil { + ui.isShowingError = true + ui.isSearching = false + ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)") + return + } + + switch v := ui.mountedPage.(type) { + case *BucketsView: + ui.moveNextPage(NewBucketsView(ui, res)) + case *RecordsView: + bucket := v.bucket + ui.moveNextPage(NewRecordsView(ui, bucket, res)) + } + + if ui.searchBar.GetText() != "" { + ui.searchBar.AddToHistory(ui.searchBar.GetText()) + } + + ui.searchBar.SetText("") + ui.isSearching = false + case k == tcell.KeyEsc: + ui.isSearching = false + case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0: + ui.isSearching = false + default: + ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) + } + + ui.MouseHandler() +} + +func (ui *UI) WithPrompt(prompt string) error { + filter, err := ui.processPrompt(prompt) + if err != nil { + return err + } + + ui.pageToMount = NewBucketsView(ui, filter) + + if prompt != "" { + ui.searchBar.AddToHistory(prompt) + } + + return nil +} + +func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) { + if prompt == "" { + return NewFilter(nil), nil + } + + filterMap := make(map[string]any) + + for _, filterString := range strings.Split(prompt, "+") { + parts := strings.Split(filterString, ":") + if len(parts) != 2 { + return nil, errors.New("expected 'tag:value [+ tag:value]...'") + } + + filterTag := strings.TrimSpace(parts[0]) + filterValueString := strings.TrimSpace(parts[1]) + + if _, exists := filterMap[filterTag]; exists { + return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag) + } + + parser, ok := ui.filters[filterTag] + if ok { + filterValue, err := parser(filterValueString) + if err != nil { + return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err) + } + + filterMap[filterTag] = filterValue + continue + } + + compositeParser, ok := ui.compositeFilters[filterTag] + if ok { + compositeFilterValue, err := compositeParser(filterValueString) + if err != nil { + return nil, fmt.Errorf( + "can't parse '%s' filter value '%s': %w", + filterTag, filterValueString, err, + ) + } + + for tag, value := range compositeFilterValue { + if _, exists := filterMap[tag]; exists { + return nil, fmt.Errorf( + "found duplicate filter tag '%s' while processing composite filter with tag '%s'", + tag, filterTag, + ) + } + + filterMap[tag] = value + } + continue + } + + return nil, fmt.Errorf("unknown filter tag '%s'", filterTag) + } + + return NewFilter(filterMap), nil +} diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go new file mode 100644 index 000000000..2d1ab3e33 --- /dev/null +++ b/cmd/frostfs-lens/internal/tui/util.go @@ -0,0 +1,110 @@ +package tui + +import ( + "errors" + "strings" + "time" + + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" + "go.etcd.io/bbolt" +) + +func OpenDB(path string, writable bool) (*bbolt.DB, error) { + db, err := bbolt.Open(path, 0o600, &bbolt.Options{ + ReadOnly: !writable, + Timeout: 100 * time.Millisecond, + }) + if err != nil { + return nil, err + } + return db, nil +} + +func CIDParser(s string) (any, error) { + data, err := base58.Decode(s) + if err != nil { + return nil, err + } + var id cid.ID + if err = id.Decode(data); err != nil { + return nil, err + } + return id, nil +} + +func OIDParser(s string) (any, error) { + data, err := base58.Decode(s) + if err != nil { + return nil, err + } + var id oid.ID + if err = id.Decode(data); err != nil { + return nil, err + } + return id, nil +} + +func AddressParser(s string) (map[string]any, error) { + m := make(map[string]any) + + parts := strings.Split(s, "/") + if len(parts) != 2 { + return nil, errors.New("expected /") + } + cnr, err := CIDParser(parts[0]) + if err != nil { + return nil, err + } + obj, err := OIDParser(parts[1]) + if err != nil { + return nil, err + } + + m["cid"] = cnr + m["oid"] = obj + + return m, nil +} + +func keyParser(s string) (any, error) { + if s == "" { + return nil, errors.New("empty attribute key") + } + return s, nil +} + +func valueParser(s string) (any, error) { + if s == "" { + return nil, errors.New("empty attribute value") + } + return s, nil +} + +func AttributeParser(s string) (map[string]any, error) { + m := make(map[string]any) + + parts := strings.Split(s, "/") + if len(parts) != 1 && len(parts) != 2 { + return nil, errors.New("expected or /") + } + + key, err := keyParser(parts[0]) + if err != nil { + return nil, err + } + m["key"] = key + + if len(parts) == 1 { + return m, nil + } + + value, err := valueParser(parts[1]) + if err != nil { + return nil, err + } + m["value"] = value + + return m, nil +} diff --git a/cmd/frostfs-lens/internal/writecache/root.go b/cmd/frostfs-lens/internal/writecache/root.go index eb3b325b6..d7d6db240 100644 --- a/cmd/frostfs-lens/internal/writecache/root.go +++ b/cmd/frostfs-lens/internal/writecache/root.go @@ -17,5 +17,5 @@ var Root = &cobra.Command{ } func init() { - Root.AddCommand(listCMD, inspectCMD) + Root.AddCommand(listCMD, inspectCMD, tuiCMD) } diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go new file mode 100644 index 000000000..b7e4d7c96 --- /dev/null +++ b/cmd/frostfs-lens/internal/writecache/tui.go @@ -0,0 +1,68 @@ +package writecache + +import ( + "context" + "fmt" + + common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" + "github.com/rivo/tview" + "github.com/spf13/cobra" +) + +var tuiCMD = &cobra.Command{ + Use: "explore", + Short: "Write cache exploration with a terminal UI", + Long: `Launch a terminal UI to explore write cache and search for data. + +Available search filters: +- cid CID +- oid OID +- addr CID/OID +`, + Run: tuiFunc, +} + +var initialPrompt string + +func init() { + common.AddComponentPathFlag(tuiCMD, &vPath) + + tuiCMD.Flags().StringVar( + &initialPrompt, + "filter", + "", + "Filter prompt to start with, format 'tag:value [+ tag:value]...'", + ) +} + +func tuiFunc(cmd *cobra.Command, _ []string) { + common.ExitOnErr(cmd, runTUI(cmd)) +} + +func runTUI(cmd *cobra.Command) error { + db, err := tui.OpenDB(vPath, false) + if err != nil { + return fmt.Errorf("couldn't open database: %w", err) + } + defer db.Close() + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + app := tview.NewApplication() + ui := tui.NewUI(ctx, app, db, schema.WritecacheParser, nil) + + _ = ui.AddFilter("cid", tui.CIDParser, "CID") + _ = ui.AddFilter("oid", tui.OIDParser, "OID") + _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") + + err = ui.WithPrompt(initialPrompt) + if err != nil { + return fmt.Errorf("invalid filter prompt: %w", err) + } + + app.SetRoot(ui, true).SetFocus(ui) + return app.Run() +} diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go index ec737f8a0..2d52e0c56 100644 --- a/cmd/frostfs-node/accounting.go +++ b/cmd/frostfs-node/accounting.go @@ -3,19 +3,18 @@ package main import ( "context" "net" + "strings" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc" accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph" + accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" "google.golang.org/grpc" ) func initAccountingService(ctx context.Context, c *cfg) { - if c.cfgMorph.client == nil { - initMorphComponents(ctx, c) - } + c.initMorphComponents(ctx) balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0) fatalOnErr(err) @@ -32,5 +31,27 @@ func initAccountingService(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { accountingGRPC.RegisterAccountingServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server) }) } + +// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support. +func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc { + sdLegacy := new(grpc.ServiceDesc) + *sdLegacy = sd + + const ( + legacyNamespace = "neo.fs.v2" + apemanagerLegacyNamespace = "frostfs.v2" + newNamespace = "frost.fs" + ) + + if strings.HasPrefix(sd.ServiceName, legacyNamespace) { + sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace) + } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) { + sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace) + } + return sdLegacy +} diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index 79c45c254..513314712 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -3,22 +3,23 @@ package main import ( "net" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" + apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" "google.golang.org/grpc" ) func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.shared.key, + c.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, + c.cfgMorph.client, apemanager.WithLogger(c.log)) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) @@ -26,5 +27,8 @@ func initAPEManagerService(c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { apemanager_grpc.RegisterAPEManagerServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server) }) } diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index 64c3beba7..ce8ae9662 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,9 +6,5 @@ import ( ) func parseAttributes(c *cfg) { - if nodeconfig.Relay(c.appCfg) { - return - } - fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 81d552729..e5df0a22d 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,22 +1,30 @@ package main import ( + "bytes" + "cmp" + "context" + "slices" "sync" + "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/zap" ) -type netValueReader[K any, V any] func(K) (V, error) +type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type valueWithError[V any] struct { v V @@ -49,7 +57,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n // updates the value from the network on cache miss or by TTL. // // returned value should not be modified. -func (c *ttlNetCache[K, V]) get(key K) (V, error) { +func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { hit := false startedAt := time.Now() defer func() { @@ -71,7 +79,7 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) { return val.v, val.e } - v, err := c.netRdr(key) + v, err := c.netRdr(ctx, key) c.cache.Add(key, &valueWithError[V]{ v: v, @@ -109,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } -// entity that provides LRU cache interface. -type lruNetCache struct { - cache *lru.Cache[uint64, *netmapSDK.NetMap] - - netRdr netValueReader[uint64, *netmapSDK.NetMap] - - metrics cacheMetrics -} - -// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. -func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { - cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) - fatalOnErr(err) - - return &lruNetCache{ - cache: cache, - netRdr: netRdr, - metrics: metrics, - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - val, ok := c.cache.Get(key) - if ok { - hit = true - return val, nil - } - - val, err := c.netRdr(key) - if err != nil { - return nil, err - } - - c.cache.Add(key, val) - - return val, nil -} - // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -165,14 +124,12 @@ type ttlContainerStorage struct { delInfoCache *ttlNetCache[cid.ID, *container.DelInfo] } -func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage { - const containerCacheSize = 100 - - lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) { - return v.Get(id) +func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { + lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { + return v.Get(ctx, id) }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) { - return v.DeletionInfo(id) + lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return v.DeletionInfo(ctx, id) }, metrics.NewCacheMetrics("container_deletion_info")) return ttlContainerStorage{ @@ -190,68 +147,245 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { // Get returns container value from the cache. If value is missing in the cache // or expired, then it returns value from side chain and updates the cache. -func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) { - return s.containerCache.get(cnr) +func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { + return s.containerCache.get(ctx, cnr) } -func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) { - return s.delInfoCache.get(cnr) -} - -type ttlEACLStorage struct { - *ttlNetCache[cid.ID, *container.EACL] -} - -func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage { - const eaclCacheSize = 100 - - lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) { - return v.GetEACL(id) - }, metrics.NewCacheMetrics("eacl")) - - return ttlEACLStorage{lruCnrCache} -} - -// GetEACL returns eACL value from the cache. If value is missing in the cache -// or expired, then it returns value from side chain and updates cache. -func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) { - return s.get(cnr) -} - -// InvalidateEACL removes cached eACL value. -func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) { - s.remove(cnr) +func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { + return s.delInfoCache.get(ctx, cnr) } type lruNetmapSource struct { netState netmap.State - cache *lruNetCache + client rawSource + cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] + mtx sync.RWMutex + metrics cacheMetrics + log *logger.Logger + candidates atomic.Pointer[[]netmapSDK.NodeInfo] } -func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { +type rawSource interface { + GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) +} + +func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, + netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, +) netmap.Source { const netmapCacheSize = 10 - lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) { - return v.GetNetMapByEpoch(key) - }, metrics.NewCacheMetrics("netmap")) + cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) + fatalOnErr(err) - return &lruNetmapSource{ - netState: s, - cache: lruNetmapCache, + src := &lruNetmapSource{ + netState: netState, + client: client, + cache: cache, + log: log, + metrics: metrics.NewCacheMetrics("netmap"), + } + + wg.Add(1) + go func() { + defer wg.Done() + src.updateCandidates(ctx, d) + }() + + return src +} + +// updateCandidates routine to merge netmap in cache with candidates list. +func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { + timer := time.NewTimer(d) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + newCandidates, err := s.client.GetCandidates(ctx) + if err != nil { + s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) + timer.Reset(d) + break + } + if len(newCandidates) == 0 { + s.candidates.Store(&newCandidates) + timer.Reset(d) + break + } + slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + return cmp.Compare(n1.Hash(), n2.Hash()) + }) + + // Check once state changed + v := s.candidates.Load() + if v == nil { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + timer.Reset(d) + break + } + ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || + uint32(n1.Status()) != uint32(n2.Status()) || + slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { + return 1 + } + ne1 := slices.Collect(n1.NetworkEndpoints()) + ne2 := slices.Collect(n2.NetworkEndpoints()) + return slices.Compare(ne1, ne2) + }) + if ret != 0 { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + } + timer.Reset(d) + } } } -func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff) +func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { + s.mtx.Lock() + tmp := s.cache.Values() + s.mtx.Unlock() + for _, pointer := range tmp { + nm := pointer.Load() + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + nm = nm.Clone() + mergeNetmapWithCandidates(updates, nm) + pointer.Store(nm) + } + } } -func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(epoch) +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + s.mtx.RLock() + val, ok := s.cache.Get(key) + s.mtx.RUnlock() + if ok { + hit = true + return val.Load(), nil + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + val, ok = s.cache.Get(key) + if ok { + hit = true + return val.Load(), nil + } + + nm, err := s.client.GetNetMapByEpoch(ctx, key) + if err != nil { + return nil, err + } + v := s.candidates.Load() + if v != nil { + updates := getNetMapNodesToUpdate(nm, *v) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + } + + p := atomic.Pointer[netmapSDK.NetMap]{} + p.Store(nm) + s.cache.Add(key, &p) + + return nm, nil } -func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.cache.get(epoch) +// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. +func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { + for _, v := range updates { + if v.status != netmapSDK.UnspecifiedState { + nm.Nodes()[v.netmapIndex].SetStatus(v.status) + } + if v.externalAddresses != nil { + nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) + } + if v.endpoints != nil { + nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) + } + } +} + +type nodeToUpdate struct { + netmapIndex int + status netmapSDK.NodeState + externalAddresses []string + endpoints []string +} + +// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. +func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { + var res []nodeToUpdate + for i := range nm.Nodes() { + for _, cnd := range candidates { + if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { + var tmp nodeToUpdate + var update bool + + if cnd.Status() != nm.Nodes()[i].Status() && + (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { + update = true + tmp.status = cnd.Status() + } + + externalAddresses := cnd.ExternalAddresses() + if externalAddresses != nil && + slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { + update = true + tmp.externalAddresses = externalAddresses + } + + nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) + nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) + candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) + candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) + if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { + update = true + tmp.endpoints = candidateEndpoints + } + + if update { + tmp.netmapIndex = i + res = append(res, tmp) + } + + break + } + } + } + return res +} + +func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) +} + +func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, epoch) +} + +func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + val, err := s.get(ctx, epoch) if err != nil { return nil, err } @@ -259,7 +393,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err return val, nil } -func (s *lruNetmapSource) Epoch() (uint64, error) { +func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { return s.netState.CurrentEpoch(), nil } @@ -267,7 +401,10 @@ type cachedIRFetcher struct { *ttlNetCache[struct{}, [][]byte] } -func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher { +func newCachedIRFetcher(f interface { + InnerRingKeys(ctx context.Context) ([][]byte, error) +}, +) cachedIRFetcher { const ( irFetcherCacheSize = 1 // we intend to store only one value @@ -281,8 +418,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached ) irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, - func(_ struct{}) ([][]byte, error) { - return f.InnerRingKeys() + func(ctx context.Context, _ struct{}) ([][]byte, error) { + return f.InnerRingKeys(ctx) }, metrics.NewCacheMetrics("ir_keys"), ) @@ -292,8 +429,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // the cache or expired, then it returns keys from side chain and updates // the cache. -func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) { - val, err := f.get(struct{}{}) +func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { + val, err := f.get(ctx, struct{}{}) if err != nil { return nil, err } @@ -305,18 +442,18 @@ type ttlMaxObjectSizeCache struct { mtx sync.RWMutex lastUpdated time.Time lastSize uint64 - src putsvc.MaxSizeSource + src objectwriter.MaxSizeSource metrics cacheMetrics } -func newCachedMaxObjectSizeSource(src putsvc.MaxSizeSource) putsvc.MaxSizeSource { +func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.MaxSizeSource { return &ttlMaxObjectSizeCache{ src: src, metrics: metrics.NewCacheMetrics("max_object_size"), } } -func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { +func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { const ttl = time.Second * 30 hit := false @@ -338,7 +475,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { c.mtx.Lock() size = c.lastSize if !c.lastUpdated.After(prevUpdated) { - size = c.src.MaxObjectSize() + size = c.src.MaxObjectSize(ctx) c.lastSize = size c.lastUpdated = time.Now() } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index f8c324a2f..24286826f 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -1,10 +1,13 @@ package main import ( + "context" "errors" + "sync" "testing" "time" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -17,7 +20,7 @@ func TestTTLNetCache(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.Equal(t, ti, val) }) @@ -26,7 +29,7 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) time.Sleep(2 * ttlDuration) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) @@ -35,20 +38,20 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) cache.remove(key) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) t.Run("Test Cache Error", func(t *testing.T) { cache.set("error", time.Now(), errors.New("mock error")) - _, err := cache.get("error") + _, err := cache.get(context.Background(), "error") require.Error(t, err) require.Equal(t, "mock error", err.Error()) }) } -func testNetValueReader(key string) (time.Time, error) { +func testNetValueReader(_ context.Context, key string) (time.Time, error) { if key == "error" { return time.Now(), errors.New("mock error") } @@ -58,3 +61,75 @@ func testNetValueReader(key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} + +type rawSrc struct{} + +func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Online) + node0.SetExternalAddresses("1", "0") + node0.SetNetworkEndpoints("1", "0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Online) + node1.SetExternalAddresses("1", "0") + node1.SetNetworkEndpoints("1", "0") + + return []netmapSDK.NodeInfo{node0, node1}, nil +} + +func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm := netmapSDK.NetMap{} + nm.SetEpoch(1) + + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Maintenance) + node0.SetExternalAddresses("0") + node0.SetNetworkEndpoints("0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Maintenance) + node1.SetExternalAddresses("0") + node1.SetNetworkEndpoints("0") + + nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) + + return &nm, nil +} + +type st struct{} + +func (s *st) CurrentEpoch() uint64 { + return 1 +} + +func TestNetmapStorage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) + + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) + require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) + require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) + + require.Eventually(t, func() bool { + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + for _, node := range nm.Nodes() { + if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && + node.NumberOfNetworkEndpoints() == 2) { + return false + } + } + return true + }, time.Second*5, time.Millisecond*10) + + cancel() + wg.Wait() +} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 5b91e7819..96274e625 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -15,7 +15,6 @@ import ( "syscall" "time" - netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit" @@ -26,18 +25,23 @@ import ( fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -56,6 +60,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone" tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" @@ -67,6 +72,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -102,14 +109,20 @@ type applicationConfiguration struct { LoggerCfg struct { level string destination string + timestamp bool + options []zap.Option + tags [][]string + } + + ObjectCfg struct { + tombstoneLifetime uint64 + priorityMetrics []placement.Metric } EngineCfg struct { errorThreshold uint32 - shardPoolSize uint32 shards []shardCfg lowMem bool - rebuildWorkers uint32 } // if need to run node in compatibility with other versions mode @@ -117,15 +130,13 @@ type applicationConfiguration struct { } type shardCfg struct { - compress bool - estimateCompressibility bool - estimateCompressibilityThreshold float64 + compression compression.Config smallSizeObjectLimit uint64 - uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode + limiter qos.Limiter metaCfg struct { path string @@ -146,13 +157,12 @@ type shardCfg struct { writecacheCfg struct { enabled bool path string - maxBatchSize int - maxBatchDelay time.Duration - smallObjectSize uint64 maxObjSize uint64 flushWorkerCount int sizeLimit uint64 + countLimit uint64 noSync bool + flushSizeLimit uint64 } piloramaCfg struct { @@ -222,67 +232,88 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) + a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) + var opts []zap.Option + if loggerconfig.ToLokiConfig(c).Enabled { + opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) + return lokiCore + })} + } + a.LoggerCfg.options = opts + a.LoggerCfg.tags = loggerconfig.Tags(c) + + // Object + + a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) + locodeDBPath := nodeconfig.LocodeDBPath(c) + parser, err := placement.NewMetricsParser(locodeDBPath) + if err != nil { + return fmt.Errorf("metrics parser creation: %w", err) + } + m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) + if err != nil { + return fmt.Errorf("parse metrics: %w", err) + } + a.ObjectCfg.priorityMetrics = m // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) - a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) - a.EngineCfg.rebuildWorkers = engineconfig.EngineRebuildWorkersCount(c) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) } -func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { - var newConfig shardCfg +func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { + var target shardCfg - newConfig.refillMetabase = oldConfig.RefillMetabase() - newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount() - newConfig.mode = oldConfig.Mode() - newConfig.compress = oldConfig.Compress() - newConfig.estimateCompressibility = oldConfig.EstimateCompressibility() - newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold() - newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() - newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() + target.refillMetabase = source.RefillMetabase() + target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() + target.mode = source.Mode() + target.compression = source.Compression() + target.smallSizeObjectLimit = source.SmallSizeLimit() - a.setShardWriteCacheConfig(&newConfig, oldConfig) + a.setShardWriteCacheConfig(&target, source) - a.setShardPiloramaConfig(c, &newConfig, oldConfig) + a.setShardPiloramaConfig(c, &target, source) - if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { + if err := a.setShardStorageConfig(&target, source); err != nil { return err } - a.setMetabaseConfig(&newConfig, oldConfig) + a.setMetabaseConfig(&target, source) - a.setGCConfig(&newConfig, oldConfig) + a.setGCConfig(&target, source) + if err := a.setLimiter(&target, source); err != nil { + return err + } - a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) + a.EngineCfg.shards = append(a.EngineCfg.shards, target) return nil } -func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - writeCacheCfg := oldConfig.WriteCache() +func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { + writeCacheCfg := source.WriteCache() if writeCacheCfg.Enabled() { - wc := &newConfig.writecacheCfg + wc := &target.writecacheCfg wc.enabled = true wc.path = writeCacheCfg.Path() - wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize() - wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay() wc.maxObjSize = writeCacheCfg.MaxObjectSize() - wc.smallObjectSize = writeCacheCfg.SmallObjectSize() wc.flushWorkerCount = writeCacheCfg.WorkerCount() wc.sizeLimit = writeCacheCfg.SizeLimit() + wc.countLimit = writeCacheCfg.CountLimit() wc.noSync = writeCacheCfg.NoSync() + wc.flushSizeLimit = writeCacheCfg.MaxFlushingObjectsSize() } } -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := oldConfig.Pilorama() - pr := &newConfig.piloramaCfg + piloramaCfg := source.Pilorama() + pr := &target.piloramaCfg pr.enabled = true pr.path = piloramaCfg.Path() @@ -293,8 +324,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC } } -func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { - blobStorCfg := oldConfig.BlobStor() +func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { + blobStorCfg := source.BlobStor() storagesCfg := blobStorCfg.Storages() ss := make([]subStorageCfg, 0, len(storagesCfg)) @@ -328,13 +359,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol ss = append(ss, sCfg) } - newConfig.subStorages = ss + target.subStorages = ss return nil } -func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - metabaseCfg := oldConfig.Metabase() - m := &newConfig.metaCfg +func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { + metabaseCfg := source.Metabase() + m := &target.metaCfg m.path = metabaseCfg.Path() m.perm = metabaseCfg.BoltDB().Perm() @@ -342,12 +373,22 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() } -func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - gcCfg := oldConfig.GC() - newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { + gcCfg := source.GC() + target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +} + +func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { + limitsConfig := source.Limits().ToConfig() + limiter, err := qos.NewLimiter(limitsConfig) + if err != nil { + return err + } + target.limiter = limiter + return nil } // internals contains application-specific internals that are created @@ -378,16 +419,16 @@ type internals struct { } // starts node's maintenance. -func (c *cfg) startMaintenance() { +func (c *cfg) startMaintenance(ctx context.Context) { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. -func (c *internals) stopMaintenance() { +func (c *internals) stopMaintenance(ctx context.Context) { if c.isMaintenance.CompareAndSwap(true, false) { - c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance) } } @@ -430,12 +471,13 @@ type shared struct { metricsCollector *metrics.NodeMetrics metricsSvc *objectService.MetricCollector + + dialerSource *internalNet.DialerSource } // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { - logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -472,6 +514,7 @@ type cfg struct { cfgNetmap cfgNetmap cfgControlService cfgControlService cfgObject cfgObject + cfgQoSService cfgQoSService } // ReadCurrentNetMap reads network map which has been cached at the @@ -506,6 +549,8 @@ type cfgGRPC struct { maxChunkSize uint64 maxAddrAmount uint64 reconnectTimeout time.Duration + + limiter atomic.Pointer[limiting.SemaphoreLimiter] } func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { @@ -565,13 +610,16 @@ func (c *cfgGRPC) dropConnection(endpoint string) { } type cfgMorph struct { - client *client.Client + initialized bool + guard sync.Mutex - notaryEnabled bool + client *client.Client // TTL of Sidechain cached values. Non-positive value disables caching. cacheTTL time.Duration + containerCacheSize uint32 + proxyScriptHash neogoutil.Uint160 } @@ -582,9 +630,10 @@ type cfgAccounting struct { type cfgContainer struct { scriptHash neogoutil.Uint160 - parsers map[event.Type]event.NotificationParser - subscribers map[event.Type][]event.Handler - workerPool util.WorkerPool // pool for asynchronous handlers + parsers map[event.Type]event.NotificationParser + subscribers map[event.Type][]event.Handler + workerPool util.WorkerPool // pool for asynchronous handlers + containerBatchSize uint32 } type cfgFrostfsID struct { @@ -602,9 +651,7 @@ type cfgNetmap struct { state *networkState - needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime - startEpoch uint64 // epoch number when application is started } type cfgNodeInfo struct { @@ -617,15 +664,13 @@ type cfgObject struct { cnrSource container.Source - eaclSource container.EACLSource - cfgAccessPolicyEngine cfgAccessPolicyEngine pool cfgObjectRoutines cfgLocalStorage cfgLocalStorage - tombstoneLifetime uint64 + tombstoneLifetime *atomic.Uint64 skipSessionTokenIssuerVerification bool } @@ -641,10 +686,6 @@ type cfgAccessPolicyEngine struct { } type cfgObjectRoutines struct { - putRemote *ants.Pool - - putLocal *ants.Pool - replication *ants.Pool } @@ -668,11 +709,9 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) - relayOnly := nodeconfig.Relay(appCfg) - netState := newNetworkState() - c.shared = initShared(appCfg, key, netState, relayOnly) + c.shared = initShared(appCfg, key, netState) netState.metrics = c.metricsCollector @@ -681,12 +720,7 @@ func initCfg(appCfg *config.Config) *cfg { logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - if loggerconfig.ToLokiConfig(appCfg).Enabled { - log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) - return lokiCore - })) - } + logger.UpdateLevelForTags(logPrm) c.internals = initInternals(appCfg, log) @@ -697,7 +731,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) + c.cfgNetmap = initNetmap(appCfg, netState) c.cfgGRPC = initCfgGRPC() @@ -743,22 +777,24 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { - var netAddr network.AddressGroup - - if !relayOnly { - netAddr = nodeconfig.BootstrapAddresses(appCfg) - } +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { + netAddr := nodeconfig.BootstrapAddresses(appCfg) persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) + nodeMetrics := metrics.NewNodeMetrics() + + ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics())) + fatalOnErr(err) + cacheOpts := cache.ClientCacheOpts{ DialTimeout: apiclientconfig.DialTimeout(appCfg), StreamTimeout: apiclientconfig.StreamTimeout(appCfg), Key: &key.PrivateKey, AllowExternal: apiclientconfig.AllowExternal(appCfg), ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + DialerSource: ds, } return shared{ @@ -770,22 +806,38 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt bgClientCache: cache.NewSDKClientCache(cacheOpts), putClientCache: cache.NewSDKClientCache(cacheOpts), persistate: persistate, - metricsCollector: metrics.NewNodeMetrics(), + metricsCollector: nodeMetrics, + dialerSource: ds, } } -func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { +func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config { + result := internalNet.Config{ + Enabled: multinet.Enabled(appCfg), + Balancer: multinet.Balancer(appCfg), + Restrict: multinet.Restrict(appCfg), + FallbackDelay: multinet.FallbackDelay(appCfg), + Metrics: m, + } + sn := multinet.Subnets(appCfg) + for _, s := range sn { + result.Subnets = append(result.Subnets, internalNet.Subnet{ + Prefix: s.Mask, + SourceIPs: s.SourceIPs, + }) + } + return result +} + +func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) - var reBootstrapTurnedOff atomic.Bool - reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - needBootstrap: !relayOnly, - reBoostrapTurnedOff: &reBootstrapTurnedOff, + reBoostrapTurnedOff: &atomic.Bool{}, } } @@ -805,20 +857,22 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC() cfgGRPC { - maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload - maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes +func initCfgGRPC() (cfg cfgGRPC) { + maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload + maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - return cfgGRPC{ - maxChunkSize: maxChunkSize, - maxAddrAmount: maxAddrAmount, - } + cfg.maxChunkSize = maxChunkSize + cfg.maxAddrAmount = maxAddrAmount + + return } func initCfgObject(appCfg *config.Config) cfgObject { + var tsLifetime atomic.Uint64 + tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg)) return cfgObject{ pool: initObjectPool(appCfg), - tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg), + tombstoneLifetime: &tsLifetime, skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(), } } @@ -827,11 +881,9 @@ func (c *cfg) engineOpts() []engine.Option { var opts []engine.Option opts = append(opts, - engine.WithShardPoolSize(c.EngineCfg.shardPoolSize), engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log), + engine.WithLogger(c.log.WithTag(logger.TagEngine)), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), - engine.WithRebuildWorkersCount(c.EngineCfg.rebuildWorkers), ) if c.metricsCollector != nil { @@ -861,14 +913,14 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { if wcRead := shCfg.writecacheCfg; wcRead.enabled { writeCacheOpts = append(writeCacheOpts, writecache.WithPath(wcRead.path), - writecache.WithMaxBatchSize(wcRead.maxBatchSize), - writecache.WithMaxBatchDelay(wcRead.maxBatchDelay), + writecache.WithFlushSizeLimit(wcRead.flushSizeLimit), writecache.WithMaxObjectSize(wcRead.maxObjSize), - writecache.WithSmallObjectSize(wcRead.smallObjectSize), writecache.WithFlushWorkersCount(wcRead.flushWorkerCount), writecache.WithMaxCacheSize(wcRead.sizeLimit), + writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log), + writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), + writecache.WithQoSLimiter(shCfg.limiter), ) } return writeCacheOpts @@ -907,7 +959,8 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithLogger(c.log), + blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), + blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -930,7 +983,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log), + fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -960,12 +1013,9 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompressObjects(shCfg.compress), - blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), - blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), - blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), + blobstor.WithCompression(shCfg.compression), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log), + blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -984,12 +1034,13 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID } if c.metricsCollector != nil { mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) + shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) } var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log), + shard.WithLogger(c.log.WithTag(logger.TagShard)), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), @@ -1008,29 +1059,33 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), + shard.WithLimiter(shCfg.limiter), } return sh } -func (c *cfg) loggerPrm() (*logger.Prm, error) { - // check if it has been inited before - if c.dynamicConfiguration.logger == nil { - c.dynamicConfiguration.logger = new(logger.Prm) - } - +func (c *cfg) loggerPrm() (logger.Prm, error) { + var prm logger.Prm // (re)init read configuration - err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + err := prm.SetLevelString(c.LoggerCfg.level) if err != nil { // not expected since validation should be performed before - panic("incorrect log level format: " + c.LoggerCfg.level) + return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) } - err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + err = prm.SetDestination(c.LoggerCfg.destination) if err != nil { // not expected since validation should be performed before - panic("incorrect log destination format: " + c.LoggerCfg.destination) + return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) + } + prm.PrependTimestamp = c.LoggerCfg.timestamp + prm.Options = c.LoggerCfg.options + err = prm.SetTags(c.LoggerCfg.tags) + if err != nil { + // not expected since validation should be performed before + return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) } - return c.dynamicConfiguration.logger, nil + return prm, nil } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1040,7 +1095,7 @@ func (c *cfg) LocalAddress() network.AddressGroup { func initLocalStorage(ctx context.Context, c *cfg) { ls := engine.New(c.engineOpts()...) - addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber()) }) @@ -1050,12 +1105,14 @@ func initLocalStorage(ctx context.Context, c *cfg) { var shardsAttached int for _, optsWithMeta := range c.shardOpts(ctx) { - id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...) + id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, + shard.WithTombstoneSource(c.createTombstoneSource()), + shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...) if err != nil { - c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) } else { shardsAttached++ - c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) + c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) } } if shardsAttached == 0 { @@ -1065,27 +1122,26 @@ func initLocalStorage(ctx context.Context, c *cfg) { c.cfgObject.cfgLocalStorage.localStorage = ls c.onShutdown(func() { - c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine) + c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine) - err := ls.Close(context.Background()) + err := ls.Close(context.WithoutCancel(ctx)) if err != nil { - c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure, - zap.String("error", err.Error()), + c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, + zap.Error(err), ) } else { - c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) + c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) } }) } -func initAccessPolicyEngine(_ context.Context, c *cfg) { +func initAccessPolicyEngine(ctx context.Context, c *cfg) { var localOverrideDB chainbase.LocalOverrideDatabase if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" { - c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) + c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() } else { localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( - chainbase.WithLogger(c.log), chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()), chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()), chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()), @@ -1098,7 +1154,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { c.cfgObject.cfgAccessPolicyEngine.policyContractHash) cacheSize := morphconfig.APEChainCacheSize(c.appCfg) - if cacheSize > 0 { + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) } @@ -1107,7 +1163,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { c.onShutdown(func() { if err := ape.LocalOverrideDatabaseCore().Close(); err != nil { - c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure, + c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure, zap.Error(err), ) } @@ -1117,38 +1173,22 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { var err error - optNonBlocking := ants.WithNonblocking(true) - - putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote() - pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking) - fatalOnErr(err) - - putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal() - pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking) - fatalOnErr(err) - replicatorPoolSize := replicatorconfig.PoolSize(cfg) - if replicatorPoolSize <= 0 { - replicatorPoolSize = putRemoteCapacity - } - pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) return pool } -func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) { - var res netmapV2.NodeInfo - +func (c *cfg) LocalNodeInfo() *netmap.NodeInfo { + var res netmap.NodeInfo ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.WriteToV2(&res) + res = ni } else { - c.cfgNodeInfo.localInfo.WriteToV2(&res) + res = c.cfgNodeInfo.localInfo } - - return &res, nil + return &res } // setContractNodeInfo rewrites local node info from the FrostFS network map. @@ -1158,12 +1198,12 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { c.cfgNetmap.state.setNodeInfo(ni) } -func (c *cfg) updateContractNodeInfo(epoch uint64) { - ni, err := c.netmapLocalNodeState(epoch) +func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { + ni, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, + c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -1173,42 +1213,37 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) { // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // with the binary-encoded information from the current node's configuration. // The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error { +func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { ni := c.cfgNodeInfo.localInfo - stateSetter(&ni) + ni.SetStatus(state) prm := nmClient.AddPeerPrm{} prm.SetNodeInfo(ni) - return c.cfgNetmap.wrapper.AddPeer(prm) + return c.cfgNetmap.wrapper.AddPeer(ctx, prm) } // bootstrapOnline calls cfg.bootstrapWithState with "online" state. -func bootstrapOnline(c *cfg) error { - return c.bootstrapWithState((*netmap.NodeInfo).SetOnline) +func bootstrapOnline(ctx context.Context, c *cfg) error { + return c.bootstrapWithState(ctx, netmap.Online) } // bootstrap calls bootstrapWithState with: // - "maintenance" state if maintenance is in progress on the current node // - "online", otherwise -func (c *cfg) bootstrap() error { +func (c *cfg) bootstrap(ctx context.Context) error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance) + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) + return c.bootstrapWithState(ctx, netmap.Maintenance) } - c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState, + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) - return bootstrapOnline(c) -} - -// needBootstrap checks if local node should be registered in network on bootup. -func (c *cfg) needBootstrap() bool { - return c.cfgNetmap.needBootstrap + return bootstrapOnline(ctx, c) } type dCmp struct { @@ -1228,19 +1263,19 @@ func (c *cfg) signalWatcher(ctx context.Context) { // signals causing application to shut down should have priority over // reconfiguration signal case <-ch: - c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) - c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-c.internalErr: // internal application error - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) - c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return default: // block until any signal is receieved @@ -1248,19 +1283,19 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-sighupCh: c.reloadConfig(ctx) case <-ch: - c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) - c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-c.internalErr: // internal application error - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) - c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return } } @@ -1268,35 +1303,74 @@ func (c *cfg) signalWatcher(ctx context.Context) { } func (c *cfg) reloadConfig(ctx context.Context) { - c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - c.log.Info(logs.FrostFSNodeSIGHUPSkip) + if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { + c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) return } - defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) + defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) err := c.reloadAppConfig() if err != nil { - c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) return } // all the components are expected to support // Logger's dynamic reconfiguration approach - var components []dCmp - // Logger + components := c.getComponents(ctx) - logPrm, err := c.loggerPrm() + // Object + c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) + + // Storage Engine + + var rcfg engine.ReConfiguration + for _, optsWithID := range c.shardOpts(ctx) { + rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, + shard.WithTombstoneSource(c.createTombstoneSource()), + shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)), + )) + } + + err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) if err != nil { - c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) return } - components = append(components, dCmp{"logger", logPrm.Reload}) + for _, component := range components { + err = component.reloadFunc() + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying, + zap.String("component", component.name), + zap.Error(err)) + } + } + + if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil { + c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) + return + } + + c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) +} + +func (c *cfg) getComponents(ctx context.Context) []dCmp { + var components []dCmp + + components = append(components, dCmp{"logger", func() error { + prm, err := c.loggerPrm() + if err != nil { + return err + } + logger.UpdateLevelForTags(prm) + return nil + }}) components = append(components, dCmp{"runtime", func() error { - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) return nil }}) components = append(components, dCmp{"audit", func() error { @@ -1305,12 +1379,22 @@ func (c *cfg) reloadConfig(ctx context.Context) { }}) components = append(components, dCmp{"pools", c.reloadPools}) components = append(components, dCmp{"tracing", func() error { - updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg)) + traceConfig, err := tracingconfig.ToTracingConfig(c.appCfg) + if err != nil { + return err + } + updated, err := tracing.Setup(ctx, *traceConfig) if updated { - c.log.Info(logs.FrostFSNodeTracingConfigationUpdated) + c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated) } return err }}) + if c.treeService != nil { + components = append(components, dCmp{"tree", func() error { + c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) + return nil + }}) + } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc @@ -1323,39 +1407,13 @@ func (c *cfg) reloadConfig(ctx context.Context) { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } - // Storage Engine + components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) - var rcfg engine.ReConfiguration - for _, optsWithID := range c.shardOpts(ctx) { - rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))) - } - - err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) - if err != nil { - c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) - return - } - - for _, component := range components { - err = component.reloadFunc() - if err != nil { - c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying, - zap.String("component", component.name), - zap.Error(err)) - } - } - - c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + return components } func (c *cfg) reloadPools() error { - newSize := objectconfig.Put(c.appCfg).PoolSizeLocal() - c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size") - - newSize = objectconfig.Put(c.appCfg).PoolSizeRemote() - c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size") - - newSize = replicatorconfig.PoolSize(c.appCfg) + newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") return nil @@ -1364,7 +1422,7 @@ func (c *cfg) reloadPools() error { func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) { oldSize := p.Cap() if oldSize != newSize { - c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name), + c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name), zap.Int("old", oldSize), zap.Int("new", newSize)) p.Tune(newSize) } @@ -1389,14 +1447,25 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { return tombstoneSource } -func (c *cfg) shutdown() { - old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN) +func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { + return container.NewInfoProvider(func() (container.Source, error) { + c.initMorphComponents(ctx) + cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) + if err != nil { + return nil, err + } + return containerClient.AsContainerSource(cc), nil + }) +} + +func (c *cfg) shutdown(ctx context.Context) { + old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) if old == control.HealthStatus_SHUTTING_DOWN { - c.log.Info(logs.FrostFSNodeShutdownSkip) + c.log.Info(ctx, logs.FrostFSNodeShutdownSkip) return } if old == control.HealthStatus_STARTING { - c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady) + c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady) } c.ctxCancel() @@ -1404,4 +1473,8 @@ func (c *cfg) shutdown() { for i := range c.closers { c.closers[len(c.closers)-1-i].fn() } + + if err := sdnotify.ClearStatus(); err != nil { + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) + } } diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go index 36e53ea7c..c40bf3620 100644 --- a/cmd/frostfs-node/config/calls.go +++ b/cmd/frostfs-node/config/calls.go @@ -1,6 +1,7 @@ package config import ( + "slices" "strings" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" @@ -52,6 +53,5 @@ func (x *Config) Value(name string) any { // It supports only one level of nesting and is intended to be used // to provide default values. func (x *Config) SetDefault(from *Config) { - x.defaultPath = make([]string, len(from.path)) - copy(x.defaultPath, from.path) + x.defaultPath = slices.Clone(from.path) } diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go index 68bf1c679..bc149eb7d 100644 --- a/cmd/frostfs-node/config/calls_test.go +++ b/cmd/frostfs-node/config/calls_test.go @@ -1,7 +1,6 @@ package config_test import ( - "os" "strings" "testing" @@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) { envName := strings.ToUpper( strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator)) - err := os.Setenv(envName, value) - require.NoError(t, err) + t.Setenv(envName, value) c := configtest.EmptyConfig() diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go index 35dae97d9..ee9d4268b 100644 --- a/cmd/frostfs-node/config/configdir_test.go +++ b/cmd/frostfs-node/config/configdir_test.go @@ -12,13 +12,10 @@ import ( func TestConfigDir(t *testing.T) { dir := t.TempDir() - cfgFileName0 := path.Join(dir, "cfg_00.json") - cfgFileName1 := path.Join(dir, "cfg_01.yml") + cfgFileName := path.Join(dir, "cfg_01.yml") - require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777)) - require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) c := New("", dir, "") require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) - require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) } diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go new file mode 100644 index 000000000..1cd64a6f8 --- /dev/null +++ b/cmd/frostfs-node/config/container/container.go @@ -0,0 +1,27 @@ +package containerconfig + +import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + +const ( + subsection = "container" + listStreamSubsection = "list_stream" + + // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once. + ContainerBatchSizeDefault = 1000 +) + +// ContainerBatchSize returns the value of "batch_size" config parameter +// from "list_stream" subsection of "container" section. +// +// Returns ContainerBatchSizeDefault if the value is missing or if +// the value is not positive integer. +func ContainerBatchSize(c *config.Config) uint32 { + if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil { + return ContainerBatchSizeDefault + } + size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size") + if size == 0 { + return ContainerBatchSizeDefault + } + return size +} diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go new file mode 100644 index 000000000..744cd3295 --- /dev/null +++ b/cmd/frostfs-node/config/container/container_test.go @@ -0,0 +1,27 @@ +package containerconfig_test + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestContainerSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty)) + }) + + const path = "../../../../config/example/node" + fileConfigTest := func(c *config.Config) { + require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index baa4e3c9d..7994e7809 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -11,13 +11,6 @@ import ( const ( subsection = "storage" - - // ShardPoolSizeDefault is a default value of routine pool size per-shard to - // process object PUT operations in a storage engine. - ShardPoolSizeDefault = 20 - // RebuildWorkersCountDefault is a default value of the workers count to - // process storage rebuild operations in a storage engine. - RebuildWorkersCountDefault = 100 ) // ErrNoShardConfigured is returned when at least 1 shard is required but none are found. @@ -44,6 +37,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) c.Sub(si), ) + if sc.Mode() == mode.Disabled { + continue + } + // Path for the blobstor can't be present in the default section, because different shards // must have different paths, so if it is missing, the shard is not here. // At the same time checking for "blobstor" section doesn't work proper @@ -53,10 +50,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) } (*config.Config)(sc).SetDefault(def) - if sc.Mode() == mode.Disabled { - continue - } - if err := f(sc); err != nil { return err } @@ -68,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) return nil } -// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section. -// -// Returns ShardPoolSizeDefault if the value is not a positive number. -func ShardPoolSize(c *config.Config) uint32 { - v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size") - if v > 0 { - return v - } - - return ShardPoolSizeDefault -} - // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // // Returns 0 if the the value is missing. @@ -91,11 +72,3 @@ func ShardErrorThreshold(c *config.Config) uint32 { func EngineLowMemoryConsumption(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "low_mem") } - -// EngineRebuildWorkersCount returns value of "rebuild_workers_count" config parmeter from "storage" section. -func EngineRebuildWorkersCount(c *config.Config) uint32 { - if v := config.Uint32Safe(c.Sub(subsection), "rebuild_workers_count"); v > 0 { - return v - } - return RebuildWorkersCountDefault -} diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 7473afefb..401c54edc 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -12,11 +12,30 @@ import ( fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" + writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) +func TestIterateShards(t *testing.T) { + fileConfigTest := func(c *config.Config) { + var res []string + require.NoError(t, + engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { + res = append(res, sc.Metabase().Path()) + return nil + })) + require.Equal(t, []string{"abc", "xyz"}, res) + } + + const cfgDir = "./testdata/shards" + configtest.ForEachFileType(cfgDir, fileConfigTest) + configtest.ForEnvFileType(t, cfgDir, fileConfigTest) +} + func TestEngineSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() @@ -36,9 +55,7 @@ func TestEngineSection(t *testing.T) { require.False(t, handlerCalled) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) - require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty)) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) - require.EqualValues(t, engineconfig.RebuildWorkersCountDefault, engineconfig.EngineRebuildWorkersCount(empty)) }) const path = "../../../../config/example/node" @@ -47,8 +64,6 @@ func TestEngineSection(t *testing.T) { num := 0 require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) - require.EqualValues(t, 15, engineconfig.ShardPoolSize(c)) - require.EqualValues(t, uint32(1000), engineconfig.EngineRebuildWorkersCount(c)) err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { defer func() { @@ -61,6 +76,7 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() + limits := sc.Limits() switch num { case 0: @@ -74,20 +90,22 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, wc.NoSync()) require.Equal(t, "tmp/0/cache", wc.Path()) - require.EqualValues(t, 16384, wc.SmallObjectSize()) require.EqualValues(t, 134217728, wc.MaxObjectSize()) require.EqualValues(t, 30, wc.WorkerCount()) require.EqualValues(t, 3221225472, wc.SizeLimit()) + require.EqualValues(t, 49, wc.CountLimit()) + require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize()) require.Equal(t, "tmp/0/meta", meta.Path()) require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm()) require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compress()) - require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) - require.Equal(t, true, sc.EstimateCompressibility()) - require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) + require.Equal(t, true, sc.Compression().Enabled) + require.Equal(t, compression.LevelFastest, sc.Compression().Level) + require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) + require.Equal(t, true, sc.Compression().EstimateCompressibility) + require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -118,6 +136,86 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, 30*time.Second, readLimits.IdleTimeout) + require.Equal(t, int64(10_000), readLimits.MaxRunningOps) + require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) + require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) + require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) + require.Equal(t, int64(100), writeLimits.MaxWaitingOps) + require.ElementsMatch(t, readLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(20), + ReservedOps: toPtr(1000), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(70), + ReservedOps: toPtr(10000), + }, + { + Tag: "background", + Weight: toPtr(5), + LimitOps: toPtr(10000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + { + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + Prohibited: true, + }, + { + Tag: "treesync", + Weight: toPtr(5), + LimitOps: toPtr(25), + }, + }) + require.ElementsMatch(t, writeLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(200), + ReservedOps: toPtr(100), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(700), + ReservedOps: toPtr(1000), + }, + { + Tag: "background", + Weight: toPtr(50), + LimitOps: toPtr(1000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "policer", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "treesync", + Weight: toPtr(50), + LimitOps: toPtr(100), + }, + }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -129,18 +227,20 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, wc.NoSync()) require.Equal(t, "tmp/1/cache", wc.Path()) - require.EqualValues(t, 16384, wc.SmallObjectSize()) require.EqualValues(t, 134217728, wc.MaxObjectSize()) require.EqualValues(t, 30, wc.WorkerCount()) require.EqualValues(t, 4294967296, wc.SizeLimit()) + require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit()) + require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize()) require.Equal(t, "tmp/1/meta", meta.Path()) require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm()) require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compress()) - require.Equal(t, []string(nil), sc.UncompressableContentTypes()) + require.Equal(t, false, sc.Compression().Enabled) + require.Equal(t, compression.LevelDefault, sc.Compression().Level) + require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -171,6 +271,17 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) + require.Equal(t, 0, len(readLimits.Tags)) + require.Equal(t, 0, len(writeLimits.Tags)) } return nil }) @@ -184,3 +295,7 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } + +func toPtr(v float64) *float64 { + return &v +} diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go index 9e334cd8f..b564d36f8 100644 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go @@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d < 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } // NoSync returns the value of "no_sync" config parameter. @@ -60,3 +54,11 @@ func (x *Config) MaxBatchSize() int { func (x *Config) NoSync() bool { return config.BoolSafe((*config.Config)(x), "no_sync") } + +// PageSize returns the value of "page_size" config parameter. +// +// Returns 0 if the value is not a positive number. +func (x *Config) PageSize() int { + s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) + return max(s, 0) +} diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 0620c9f63..d42646da7 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,9 +4,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -26,42 +28,27 @@ func From(c *config.Config) *Config { return (*Config)(c) } -// Compress returns the value of "compress" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) Compress() bool { - return config.BoolSafe( - (*config.Config)(x), - "compress", - ) -} - -// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. -// -// Returns nil if a the value is missing or is invalid. -func (x *Config) UncompressableContentTypes() []string { - return config.StringSliceSafe( - (*config.Config)(x), - "compression_exclude_content_types") -} - -// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) EstimateCompressibility() bool { - return config.BoolSafe( - (*config.Config)(x), - "compression_estimate_compressibility", - ) +func (x *Config) Compression() compression.Config { + cc := (*config.Config)(x).Sub("compression") + if cc == nil { + return compression.Config{} + } + return compression.Config{ + Enabled: config.BoolSafe(cc, "enabled"), + UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), + Level: compression.Level(config.StringSafe(cc, "level")), + EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), + EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), + } } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func (x *Config) EstimateCompressibilityThreshold() float64 { +func estimateCompressibilityThreshold(c *config.Config) float64 { v := config.FloatOrDefault( - (*config.Config)(x), - "compression_estimate_compressibility_threshold", + c, + "estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault @@ -125,6 +112,14 @@ func (x *Config) GC() *gcconfig.Config { ) } +// Limits returns "limits" subsection as a limitsconfig.Config. +func (x *Config) Limits() *limitsconfig.Config { + return limitsconfig.From( + (*config.Config)(x). + Sub("limits"), + ) +} + // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go new file mode 100644 index 000000000..ccd1e0000 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -0,0 +1,112 @@ +package limits + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "github.com/spf13/cast" +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Config is a wrapper over the config section +// which provides access to Shard's limits configurations. +type Config config.Config + +func (x *Config) ToConfig() qos.LimiterConfig { + result := qos.LimiterConfig{ + Read: x.read(), + Write: x.write(), + } + panicOnErr(result.Validate()) + return result +} + +func (x *Config) read() qos.OpConfig { + return x.parse("read") +} + +func (x *Config) write() qos.OpConfig { + return x.parse("write") +} + +func (x *Config) parse(sub string) qos.OpConfig { + c := (*config.Config)(x).Sub(sub) + var result qos.OpConfig + + if s := config.Int(c, "max_waiting_ops"); s > 0 { + result.MaxWaitingOps = s + } else { + result.MaxWaitingOps = qos.NoLimit + } + + if s := config.Int(c, "max_running_ops"); s > 0 { + result.MaxRunningOps = s + } else { + result.MaxRunningOps = qos.NoLimit + } + + if s := config.DurationSafe(c, "idle_timeout"); s > 0 { + result.IdleTimeout = s + } else { + result.IdleTimeout = qos.DefaultIdleTimeout + } + + result.Tags = tags(c) + + return result +} + +func tags(c *config.Config) []qos.IOTagConfig { + c = c.Sub("tags") + var result []qos.IOTagConfig + for i := 0; ; i++ { + tag := config.String(c, strconv.Itoa(i)+".tag") + if tag == "" { + return result + } + + var tagConfig qos.IOTagConfig + tagConfig.Tag = tag + + v := c.Value(strconv.Itoa(i) + ".weight") + if v != nil { + w, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.Weight = &w + } + + v = c.Value(strconv.Itoa(i) + ".limit_ops") + if v != nil { + l, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.LimitOps = &l + } + + v = c.Value(strconv.Itoa(i) + ".reserved_ops") + if v != nil { + r, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.ReservedOps = &r + } + + v = c.Value(strconv.Itoa(i) + ".prohibited") + if v != nil { + r, err := cast.ToBoolE(v) + panicOnErr(err) + tagConfig.Prohibited = r + } + + result = append(result, tagConfig) + } +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go index 28671ca55..5d4e8f408 100644 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go @@ -52,10 +52,7 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d <= 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s <= 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go index 5e31e04ad..6fff0308b 100644 --- a/cmd/frostfs-node/config/engine/shard/writecache/config.go +++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go @@ -2,7 +2,6 @@ package writecacheconfig import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb" ) // Config is a wrapper over the config section @@ -10,9 +9,6 @@ import ( type Config config.Config const ( - // SmallSizeDefault is a default size of small objects. - SmallSizeDefault = 32 << 10 - // MaxSizeDefault is a default value of the object payload size limit. MaxSizeDefault = 64 << 20 @@ -21,6 +17,11 @@ const ( // SizeLimitDefault is a default write-cache size limit. SizeLimitDefault = 1 << 30 + + // CountLimitDefault is a default write-cache count limit. + CountLimitDefault = 0 + + MaxFlushingObjectsSizeDefault = 128 << 20 ) // From wraps config section into Config. @@ -51,22 +52,6 @@ func (x *Config) Path() string { return p } -// SmallObjectSize returns the value of "small_object_size" config parameter. -// -// Returns SmallSizeDefault if the value is not a positive number. -func (x *Config) SmallObjectSize() uint64 { - s := config.SizeInBytesSafe( - (*config.Config)(x), - "small_object_size", - ) - - if s > 0 { - return s - } - - return SmallSizeDefault -} - // MaxObjectSize returns the value of "max_object_size" config parameter. // // Returns MaxSizeDefault if the value is not a positive number. @@ -115,6 +100,22 @@ func (x *Config) SizeLimit() uint64 { return SizeLimitDefault } +// CountLimit returns the value of "max_object_count" config parameter. +// +// Returns CountLimitDefault if the value is not a positive number. +func (x *Config) CountLimit() uint64 { + c := config.SizeInBytesSafe( + (*config.Config)(x), + "max_object_count", + ) + + if c > 0 { + return c + } + + return CountLimitDefault +} + // NoSync returns the value of "no_sync" config parameter. // // Returns false if the value is not a boolean. @@ -122,7 +123,18 @@ func (x *Config) NoSync() bool { return config.BoolSafe((*config.Config)(x), "no_sync") } -// BoltDB returns config instance for querying bolt db specific parameters. -func (x *Config) BoltDB() *boltdbconfig.Config { - return (*boltdbconfig.Config)(x) +// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter. +// +// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number. +func (x *Config) MaxFlushingObjectsSize() uint64 { + s := config.SizeInBytesSafe( + (*config.Config)(x), + "max_flushing_objects_size", + ) + + if s > 0 { + return s + } + + return MaxFlushingObjectsSizeDefault } diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env new file mode 100644 index 000000000..079789b0f --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.env @@ -0,0 +1,3 @@ +FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc +FROSTFS_STORAGE_SHARD_1_MODE=disabled +FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json new file mode 100644 index 000000000..b3d6abe85 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.json @@ -0,0 +1,13 @@ +{ + "storage.shard": { + "0": { + "metabase.path": "abc" + }, + "1": { + "mode": "disabled" + }, + "2": { + "metabase.path": "xyz" + } + } +} diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml new file mode 100644 index 000000000..bbbba3af8 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.yaml @@ -0,0 +1,7 @@ +storage.shard: + 0: + metabase.path: abc + 1: + mode: disabled + 2: + metabase.path: xyz diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index 378b9d793..20f373184 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,6 +2,7 @@ package loggerconfig import ( "os" + "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -52,6 +53,29 @@ func Destination(c *config.Config) string { return DestinationDefault } +// Timestamp returns the value of "timestamp" config parameter +// from "logger" section. +// +// Returns false if the value isn't specified. +func Timestamp(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "timestamp") +} + +// Tags returns the value of "tags" config parameter from "logger" section. +func Tags(c *config.Config) [][]string { + var res [][]string + sub := c.Sub(subsection).Sub("tags") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + names := config.StringSafe(s, "names") + if names == "" { + break + } + res = append(res, []string{names, config.StringSafe(s, "level")}) + } + return res +} + // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index 3587a0ddb..796ad529e 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -13,6 +13,7 @@ func TestLoggerSection_Level(t *testing.T) { t.Run("defaults", func(t *testing.T) { require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig())) require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig())) + require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig())) }) const path = "../../../../config/example/node" @@ -20,6 +21,10 @@ func TestLoggerSection_Level(t *testing.T) { fileConfigTest := func(c *config.Config) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) + require.Equal(t, true, loggerconfig.Timestamp(c)) + tags := loggerconfig.Tags(c) + require.Equal(t, "main, morph", tags[0][0]) + require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index 1c536a0e2..a9f774d18 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -30,6 +30,12 @@ const ( // FrostfsIDCacheSizeDefault is a default value of APE chain cache. FrostfsIDCacheSizeDefault = 10_000 + + // ContainerCacheSizeDefault represents the default size for the container cache. + ContainerCacheSizeDefault = 100 + + // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. + PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -103,6 +109,18 @@ func CacheTTL(c *config.Config) time.Duration { return CacheTTLDefault } +// ContainerCacheSize returns the value of "container_cache_size" config parameter +// from "morph" section. +// +// Returns 0 if the value is not positive integer. +// Returns ContainerCacheSizeDefault if the value is missing. +func ContainerCacheSize(c *config.Config) uint32 { + if c.Sub(subsection).Value("container_cache_size") == nil { + return ContainerCacheSizeDefault + } + return config.Uint32Safe(c.Sub(subsection), "container_cache_size") +} + // SwitchInterval returns the value of "switch_interval" config parameter // from "morph" section. // @@ -139,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } + +// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter +// from "morph" section. +// +// Returns PollCandidatesTimeoutDefault if the value is not positive duration. +func NetmapCandidatesPollInterval(c *config.Config) time.Duration { + v := config.DurationSafe(c.Sub(subsection). + Sub("netmap").Sub("candidates"), "poll_interval") + if v > 0 { + return v + } + + return PollCandidatesTimeoutDefault +} diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go new file mode 100644 index 000000000..f598efc51 --- /dev/null +++ b/cmd/frostfs-node/config/multinet/config.go @@ -0,0 +1,62 @@ +package multinet + +import ( + "strconv" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "multinet" + + FallbackDelayDefault = 300 * time.Millisecond +) + +// Enabled returns the value of "enabled" config parameter from "multinet" section. +func Enabled(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "enabled") +} + +type Subnet struct { + Mask string + SourceIPs []string +} + +// Subnets returns the value of "subnets" config parameter from "multinet" section. +func Subnets(c *config.Config) []Subnet { + var result []Subnet + sub := c.Sub(subsection).Sub("subnets") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + mask := config.StringSafe(s, "mask") + if mask == "" { + break + } + sourceIPs := config.StringSliceSafe(s, "source_ips") + result = append(result, Subnet{ + Mask: mask, + SourceIPs: sourceIPs, + }) + } + return result +} + +// Balancer returns the value of "balancer" config parameter from "multinet" section. +func Balancer(c *config.Config) string { + return config.StringSafe(c.Sub(subsection), "balancer") +} + +// Restrict returns the value of "restrict" config parameter from "multinet" section. +func Restrict(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "restrict") +} + +// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section. +func FallbackDelay(c *config.Config) time.Duration { + fd := config.DurationSafe(c.Sub(subsection), "fallback_delay") + if fd != 0 { // negative value means no fallback + return fd + } + return FallbackDelayDefault +} diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go new file mode 100644 index 000000000..5f7dc6d53 --- /dev/null +++ b/cmd/frostfs-node/config/multinet/config_test.go @@ -0,0 +1,52 @@ +package multinet + +import ( + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestMultinetSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + require.Equal(t, false, Enabled(empty)) + require.Equal(t, ([]Subnet)(nil), Subnets(empty)) + require.Equal(t, "", Balancer(empty)) + require.Equal(t, false, Restrict(empty)) + require.Equal(t, FallbackDelayDefault, FallbackDelay(empty)) + }) + + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + require.Equal(t, true, Enabled(c)) + require.Equal(t, []Subnet{ + { + Mask: "192.168.219.174/24", + SourceIPs: []string{ + "192.168.218.185", + "192.168.219.185", + }, + }, + { + Mask: "10.78.70.74/24", + SourceIPs: []string{ + "10.78.70.185", + "10.78.71.185", + }, + }, + }, Subnets(c)) + require.Equal(t, "roundrobin", Balancer(c)) + require.Equal(t, false, Restrict(c)) + require.Equal(t, 350*time.Millisecond, FallbackDelay(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 97aca274a..c50718c5f 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,7 +3,9 @@ package nodeconfig import ( "fmt" "io/fs" + "iter" "os" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -88,12 +90,8 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) IterateAddresses(f func(string) bool) { - for i := range x { - if f(x[i]) { - break - } - } +func (x stringAddressGroup) Addresses() iter.Seq[string] { + return slices.Values(x) } func (x stringAddressGroup) NumberOfAddresses() int { @@ -121,7 +119,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) { func Attributes(c *config.Config) (attrs []string) { const maxAttributes = 100 - for i := 0; i < maxAttributes; i++ { + for i := range maxAttributes { attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i)) if attr == "" { return @@ -133,14 +131,6 @@ func Attributes(c *config.Config) (attrs []string) { return } -// Relay returns the value of "relay" config parameter -// from "node" section. -// -// Returns false if the value is not set. -func Relay(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "relay") -} - // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { @@ -198,7 +188,7 @@ func (l PersistentPolicyRulesConfig) Path() string { // // Returns PermDefault if the value is not a positive number. func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { - p := config.UintSafe((*config.Config)(l.cfg), "perm") + p := config.UintSafe(l.cfg, "perm") if p == 0 { p = PermDefault } @@ -210,10 +200,15 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { // // Returns false if the value is not a boolean. func (l PersistentPolicyRulesConfig) NoSync() bool { - return config.BoolSafe((*config.Config)(l.cfg), "no_sync") + return config.BoolSafe(l.cfg, "no_sync") } // CompatibilityMode returns true if need to run node in compatibility with previous versions mode. func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } + +// LocodeDBPath returns path to LOCODE database. +func LocodeDBPath(c *config.Config) string { + return config.String(c.Sub(subsection), "locode_db_path") +} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 7b9adecf4..9af1dc038 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,12 +29,10 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) - relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) - require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -45,7 +43,6 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) - relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -87,8 +84,6 @@ func TestNodeSection(t *testing.T) { return false }) - require.Equal(t, true, relay) - require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index 876dc3ef1..c8c967d30 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -10,14 +10,17 @@ type PutConfig struct { cfg *config.Config } +// GetConfig is a wrapper over "get" config section which provides access +// to object get pipeline configuration of object service. +type GetConfig struct { + cfg *config.Config +} + const ( subsection = "object" putSubsection = "put" - - // PutPoolSizeDefault is a default value of routine pool size to - // process object.Put requests in object service. - PutPoolSizeDefault = 10 + getSubsection = "get" ) // Put returns structure that provides access to "put" subsection of @@ -28,31 +31,20 @@ func Put(c *config.Config) PutConfig { } } -// PoolSizeRemote returns the value of "remote_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeRemote() int { - v := config.Int(g.cfg, "remote_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - -// PoolSizeLocal returns the value of "local_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeLocal() int { - v := config.Int(g.cfg, "local_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") } + +// Get returns structure that provides access to "get" subsection of +// "object" section. +func Get(c *config.Config) GetConfig { + return GetConfig{ + c.Sub(subsection).Sub(getSubsection), + } +} + +// Priority returns the value of "priority" config parameter. +func (g GetConfig) Priority() []string { + return config.StringSliceSafe(g.cfg, "priority") +} diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go index e2bb105d9..1c525ef55 100644 --- a/cmd/frostfs-node/config/object/config_test.go +++ b/cmd/frostfs-node/config/object/config_test.go @@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) }) @@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) { const path = "../../../../config/example/node" fileConfigTest := func(c *config.Config) { - require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) - require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) } diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go index 191694970..6c3e8adab 100644 --- a/cmd/frostfs-node/config/profiler/config.go +++ b/cmd/frostfs-node/config/profiler/config.go @@ -52,7 +52,7 @@ func Address(c *config.Config) string { return AddressDefault } -// BlockRates returns the value of "block_rate" config parameter +// BlockRate returns the value of "block_rate" config parameter // from "pprof" section. func BlockRate(c *config.Config) int { s := c.Sub(subsection) diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go new file mode 100644 index 000000000..85f8180ed --- /dev/null +++ b/cmd/frostfs-node/config/qos/config.go @@ -0,0 +1,46 @@ +package qos + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +const ( + subsection = "qos" + criticalSubSection = "critical" + internalSubSection = "internal" +) + +// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, criticalSubSection) +} + +// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, internalSubSection) +} + +func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { + strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") + pubs := make(keys.PublicKeys, 0, len(strKeys)) + + for i := range strKeys { + pub, err := keys.NewPublicKeyFromString(strKeys[i]) + if err != nil { + panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) + } + + pubs = append(pubs, pub) + } + + return pubs +} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go new file mode 100644 index 000000000..b3b6019cc --- /dev/null +++ b/cmd/frostfs-node/config/qos/config_test.go @@ -0,0 +1,40 @@ +package qos + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + + require.Empty(t, CriticalAuthorizedKeys(empty)) + require.Empty(t, InternalAuthorizedKeys(empty)) + }) + + const path = "../../../../config/example/node" + + criticalPubs := make(keys.PublicKeys, 2) + criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") + criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") + + internalPubs := make(keys.PublicKeys, 2) + internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") + internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") + + fileConfigTest := func(c *config.Config) { + require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) + require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go index 0fbac935c..e954bf19d 100644 --- a/cmd/frostfs-node/config/replicator/config.go +++ b/cmd/frostfs-node/config/replicator/config.go @@ -11,6 +11,8 @@ const ( // PutTimeoutDefault is a default timeout of object put request in replicator. PutTimeoutDefault = 5 * time.Second + // PoolSizeDefault is a default pool size for put request in replicator. + PoolSizeDefault = 10 ) // PutTimeout returns the value of "put_timeout" config parameter @@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration { // PoolSize returns the value of "pool_size" config parameter // from "replicator" section. +// +// Returns PoolSizeDefault if the value is non-positive integer. func PoolSize(c *config.Config) int { - return int(config.IntSafe(c.Sub(subsection), "pool_size")) + v := int(config.IntSafe(c.Sub(subsection), "pool_size")) + if v > 0 { + return v + } + + return PoolSizeDefault } diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go index 2129c01b4..2aa490946 100644 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ b/cmd/frostfs-node/config/replicator/config_test.go @@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) { empty := configtest.EmptyConfig() require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, 0, replicatorconfig.PoolSize(empty)) + require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) }) const path = "../../../../config/example/node" diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go new file mode 100644 index 000000000..e0efdfde2 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config.go @@ -0,0 +1,42 @@ +package rpcconfig + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "rpc" + limitsSubsection = "limits" +) + +type LimitConfig struct { + Methods []string + MaxOps int64 +} + +// Limits returns the "limits" config from "rpc" section. +func Limits(c *config.Config) []LimitConfig { + c = c.Sub(subsection).Sub(limitsSubsection) + + var limits []LimitConfig + + for i := uint64(0); ; i++ { + si := strconv.FormatUint(i, 10) + sc := c.Sub(si) + + methods := config.StringSliceSafe(sc, "methods") + if len(methods) == 0 { + break + } + + if sc.Value("max_ops") == nil { + panic("no max operations for method group") + } + + limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) + } + + return limits +} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go new file mode 100644 index 000000000..a6365e19f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -0,0 +1,77 @@ +package rpcconfig + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestRPCSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + require.Empty(t, Limits(configtest.EmptyConfig())) + }) + + t.Run("correct config", func(t *testing.T) { + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(1000)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("no max operations", func(t *testing.T) { + const path = "testdata/no_max_ops" + + fileConfigTest := func(c *config.Config) { + require.Panics(t, func() { _ = Limits(c) }) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("zero max operations", func(t *testing.T) { + const path = "testdata/zero_max_ops" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(0)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env new file mode 100644 index 000000000..2fed4c5bc --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env @@ -0,0 +1,3 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json new file mode 100644 index 000000000..6156aa71d --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json @@ -0,0 +1,18 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ] + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml new file mode 100644 index 000000000..e50b7ae93 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml @@ -0,0 +1,8 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env new file mode 100644 index 000000000..ce7302b0b --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env @@ -0,0 +1,4 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=0 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json new file mode 100644 index 000000000..16a1c173f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json @@ -0,0 +1,19 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 0 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml new file mode 100644 index 000000000..525d768d4 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml @@ -0,0 +1,9 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 0 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go index 28ec65291..e98c032f0 100644 --- a/cmd/frostfs-node/config/test/config.go +++ b/cmd/frostfs-node/config/test/config.go @@ -11,8 +11,6 @@ import ( ) func fromFile(path string) *config.Config { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - return config.New(path, "", "") } @@ -64,7 +62,6 @@ func loadEnv(t testing.TB, path string) { v = strings.Trim(v, `"`) - err = os.Setenv(k, v) - require.NoError(t, err, "can't set environment variable") + t.Setenv(k, v) } } diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go index e846be158..91ef669ee 100644 --- a/cmd/frostfs-node/config/tracing/config.go +++ b/cmd/frostfs-node/config/tracing/config.go @@ -1,6 +1,12 @@ package tracing import ( + "crypto/x509" + "errors" + "fmt" + "os" + "strconv" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -11,19 +17,53 @@ const ( ) // ToTracingConfig extracts tracing config. -func ToTracingConfig(c *config.Config) *tracing.Config { - return &tracing.Config{ +func ToTracingConfig(c *config.Config) (*tracing.Config, error) { + conf := &tracing.Config{ Enabled: config.BoolSafe(c.Sub(subsection), "enabled"), Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")), Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"), Service: "frostfs-node", InstanceID: getInstanceIDOrDefault(c), Version: misc.Version, + Attributes: make(map[string]string), } + + if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" { + caBytes, err := os.ReadFile(trustedCa) + if err != nil { + return nil, fmt.Errorf("cannot read trusted ca cert by path: %w", err) + } + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(caBytes) + if !ok { + return nil, errors.New("can't fill cert pool by ca cert") + } + conf.ServerCaCertPool = certPool + } + + i := uint64(0) + for ; ; i++ { + si := strconv.FormatUint(i, 10) + ac := c.Sub(subsection).Sub("attributes").Sub(si) + k := config.StringSafe(ac, "key") + if k == "" { + break + } + v := config.StringSafe(ac, "value") + if v == "" { + return nil, fmt.Errorf("empty tracing attribute value for key %s", k) + } + if _, ok := conf.Attributes[k]; ok { + return nil, fmt.Errorf("tracing attribute key %s defined more than once", k) + } + conf.Attributes[k] = v + } + + return conf, nil } func getInstanceIDOrDefault(c *config.Config) string { - s := config.StringSlice(c.Sub("node"), "addresses") + s := config.StringSliceSafe(c.Sub("node"), "addresses") if len(s) > 0 { return s[0] } diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go new file mode 100644 index 000000000..8e485ca6e --- /dev/null +++ b/cmd/frostfs-node/config/tracing/config_test.go @@ -0,0 +1,46 @@ +package tracing + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "github.com/stretchr/testify/require" +) + +func TestTracingSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + tc, err := ToTracingConfig(configtest.EmptyConfig()) + require.NoError(t, err) + require.Equal(t, false, tc.Enabled) + require.Equal(t, tracing.Exporter(""), tc.Exporter) + require.Equal(t, "", tc.Endpoint) + require.Equal(t, "frostfs-node", tc.Service) + require.Equal(t, "", tc.InstanceID) + require.Nil(t, tc.ServerCaCertPool) + require.Empty(t, tc.Attributes) + }) + + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + tc, err := ToTracingConfig(c) + require.NoError(t, err) + require.Equal(t, true, tc.Enabled) + require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter) + require.Equal(t, "localhost", tc.Endpoint) + require.Equal(t, "frostfs-node", tc.Service) + require.Nil(t, tc.ServerCaCertPool) + require.EqualValues(t, map[string]string{ + "key0": "value", + "key1": "value", + }, tc.Attributes) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go index 8a8919999..da877791e 100644 --- a/cmd/frostfs-node/config/tree/config.go +++ b/cmd/frostfs-node/config/tree/config.go @@ -10,6 +10,8 @@ import ( const ( subsection = "tree" + + SyncBatchSizeDefault = 1000 ) // TreeConfig is a wrapper over "tree" config section @@ -74,6 +76,17 @@ func (c TreeConfig) SyncInterval() time.Duration { return config.DurationSafe(c.cfg, "sync_interval") } +// SyncBatchSize returns the value of "sync_batch_size" +// config parameter from the "tree" section. +// +// Returns `SyncBatchSizeDefault` if config value is not specified. +func (c TreeConfig) SyncBatchSize() int { + if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 { + return int(v) + } + return SyncBatchSizeDefault +} + // AuthorizedKeys parses and returns an array of "authorized_keys" config // parameter from "tree" section. // diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go index 285ea0725..6628b8878 100644 --- a/cmd/frostfs-node/config/tree/config_test.go +++ b/cmd/frostfs-node/config/tree/config_test.go @@ -44,6 +44,7 @@ func TestTreeSection(t *testing.T) { require.Equal(t, 32, treeSec.ReplicationWorkerCount()) require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout()) require.Equal(t, time.Hour, treeSec.SyncInterval()) + require.Equal(t, 2000, treeSec.SyncBatchSize()) require.Equal(t, expectedKeys, treeSec.AuthorizedKeys()) } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 5a29aac76..bdb280d87 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -5,9 +5,10 @@ import ( "context" "net" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -17,6 +18,7 @@ import ( containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc" containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" + containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" @@ -27,10 +29,10 @@ import ( func initContainerService(_ context.Context, c *cfg) { // container wrapper that tries to invoke notary // requests if chain is configured so - wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary()) + wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.shared.cnrClient = wrap + c.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -41,11 +43,12 @@ func initContainerService(_ context.Context, c *cfg) { fatalOnErr(err) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) - if cacheSize > 0 { - frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL) + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { + frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.shared.frostfsidClient = frostfsIDSubjectProvider + c.frostfsidClient = frostfsIDSubjectProvider + c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), @@ -54,8 +57,10 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, - containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc), + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, + containerService.NewSplitterService( + c.cfgContainer.containerBatchSize, c.respSvc, + containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), ), ) service = containerService.NewAuditService(service, c.log, c.audit) @@ -63,16 +68,15 @@ func initContainerService(_ context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { containerGRPC.RegisterContainerServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server) }) c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr) } func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) { - eACLFetcher := &morphEACLFetcher{ - w: client, - } - cnrRdr := new(morphContainerReader) cnrWrt := &morphContainerWriter{ @@ -80,57 +84,51 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c } if c.cfgMorph.cacheTTL <= 0 { - c.cfgObject.eaclSource = eACLFetcher - cnrRdr.eacl = eACLFetcher c.cfgObject.cnrSource = cnrSrc cnrRdr.src = cnrSrc cnrRdr.lister = client } else { // use RPC node as source of Container contract items (with caching) - cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL) - cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL) + c.cfgObject.cnrSource = cnrSrc + if c.cfgMorph.containerCacheSize > 0 { + containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize) - subscribeToContainerCreation(c, func(e event.Event) { - ev := e.(containerEvent.PutSuccess) + subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) { + ev := e.(containerEvent.PutSuccess) - // read owner of the created container in order to update the reading cache. - // TODO: use owner directly from the event after neofs-contract#256 will become resolved - // but don't forget about the profit of reading the new container and caching it: - // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ev.ID) - if err == nil { - cachedContainerStorage.containerCache.set(ev.ID, cnr, nil) - } else { - // unlike removal, we expect successful receive of the container - // after successful creation, so logging can be useful - c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + // read owner of the created container in order to update the reading cache. + // TODO: use owner directly from the event after neofs-contract#256 will become resolved + // but don't forget about the profit of reading the new container and caching it: + // creation success are most commonly tracked by polling GET op. + cnr, err := cnrSrc.Get(ctx, ev.ID) + if err == nil { + containerCache.containerCache.set(ev.ID, cnr, nil) + } else { + // unlike removal, we expect successful receive of the container + // after successful creation, so logging can be useful + c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + zap.Stringer("id", ev.ID), + zap.Error(err), + ) + } + + c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), - zap.Error(err), ) - } + }) - c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) - - subscribeToContainerRemoval(c, func(e event.Event) { - ev := e.(containerEvent.DeleteSuccess) - cachedContainerStorage.handleRemoval(ev.ID) - c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) - - c.cfgObject.eaclSource = cachedEACLStorage - c.cfgObject.cnrSource = cachedContainerStorage + subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { + ev := e.(containerEvent.DeleteSuccess) + containerCache.handleRemoval(ev.ID) + c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt, + zap.Stringer("id", ev.ID), + ) + }) + c.cfgObject.cnrSource = containerCache + } cnrRdr.lister = client - cnrRdr.eacl = c.cfgObject.eaclSource cnrRdr.src = c.cfgObject.cnrSource - - cnrWrt.cacheEnabled = true - cnrWrt.eacls = cachedEACLStorage } return cnrRdr, cnrWrt @@ -220,42 +218,38 @@ func (c *cfg) ExternalAddresses() []string { // implements interface required by container service provided by morph executor. type morphContainerReader struct { - eacl containerCore.EACLSource - src containerCore.Source lister interface { - ContainersOf(*user.ID) ([]cid.ID, error) + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } } -func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) { - return x.src.Get(id) +func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { + return x.src.Get(ctx, id) } -func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) { - return x.src.DeletionInfo(id) +func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { + return x.src.DeletionInfo(ctx, id) } -func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) { - return x.eacl.GetEACL(id) +func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { + return x.lister.ContainersOf(ctx, id) } -func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { - return x.lister.ContainersOf(id) +func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { + return x.lister.IterateContainersOf(ctx, id, processCID) } type morphContainerWriter struct { neoClient *cntClient.Client - - cacheEnabled bool - eacls ttlEACLStorage } -func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) { - return cntClient.Put(m.neoClient, cnr) +func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) { + return cntClient.Put(ctx, m.neoClient, cnr) } -func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error { - return cntClient.Delete(m.neoClient, witness) +func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error { + return cntClient.Delete(ctx, m.neoClient, witness) } diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index e1e6e3ac9..1825013c7 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -7,16 +7,19 @@ import ( controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "go.uber.org/zap" "google.golang.org/grpc" ) const serviceNameControl = "control" -func initControlService(c *cfg) { +func initControlService(ctx context.Context, c *cfg) { endpoint := controlconfig.GRPC(c.appCfg).Endpoint() if endpoint == controlconfig.GRPCEndpointDefault { return @@ -46,21 +49,28 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } - c.cfgControlService.server = grpc.NewServer() + c.cfgControlService.server = grpc.NewServer( + grpc.ChainUnaryInterceptor( + qos.NewSetCriticalIOTagUnaryServerInterceptor(), + metrics.NewUnaryServerInterceptor(), + tracing.NewUnaryServerInterceptor(), + ), + // control service has no stream methods, so no stream interceptors added + ) c.onShutdown(func() { - stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log) + stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) }) control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) { - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", serviceNameControl), zap.String("endpoint", endpoint)) fatalOnErr(c.cfgControlService.server.Serve(lis)) @@ -72,23 +82,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus { return c.cfgNetmap.state.controlNetmapStatus() } -func (c *cfg) setHealthStatus(st control.HealthStatus) { - c.notifySystemd(st) +func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) { + c.notifySystemd(ctx, st) c.healthStatus.Store(int32(st)) c.metricsCollector.State().SetHealth(int32(st)) } -func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) { +func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - c.notifySystemd(newSt) + c.notifySystemd(ctx, newSt) c.metricsCollector.State().SetHealth(int32(newSt)) } return } -func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) { +func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) { old = control.HealthStatus(c.healthStatus.Swap(int32(st))) - c.notifySystemd(st) + c.notifySystemd(ctx, st) c.metricsCollector.State().SetHealth(int32(st)) return } @@ -97,7 +107,7 @@ func (c *cfg) HealthStatus() control.HealthStatus { return control.HealthStatus(c.healthStatus.Load()) } -func (c *cfg) notifySystemd(st control.HealthStatus) { +func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) { if !c.sdNotify { return } @@ -113,6 +123,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index fb55a6019..d2d4e9785 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -1,6 +1,8 @@ package main import ( + "context" + "strings" "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -9,57 +11,101 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" ) +type subjectWithError struct { + subject *client.Subject + err error +} + +type subjectExtWithError struct { + subject *client.SubjectExtended + err error +} + type morphFrostfsIDCache struct { subjProvider frostfsidcore.SubjectProvider - subjCache *expirable.LRU[util.Uint160, *client.Subject] + subjCache *expirable.LRU[util.Uint160, subjectWithError] - subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended] + subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError] + + metrics cacheMetrics } -func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider { +func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider { return &morphFrostfsIDCache{ subjProvider: subjProvider, - subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl), + subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl), - subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl), + subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl), + + metrics: metrics, } } -func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) { +func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { + hit := false + startedAt := time.Now() + defer func() { + m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit) + }() + result, found := m.subjCache.Get(addr) if found { - return result, nil + hit = true + return result.subject, result.err } - result, err := m.subjProvider.GetSubject(addr) + subj, err := m.subjProvider.GetSubject(ctx, addr) if err != nil { + if m.isCacheableError(err) { + m.subjCache.Add(addr, subjectWithError{ + err: err, + }) + } return nil, err } - m.subjCache.Add(addr, result) - return result, nil + m.subjCache.Add(addr, subjectWithError{subject: subj}) + return subj, nil } -func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { - subjExt, found := m.subjExtCache.Get(addr) +func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { + hit := false + startedAt := time.Now() + defer func() { + m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit) + }() + + result, found := m.subjExtCache.Get(addr) if found { - return subjExt, nil + hit = true + return result.subject, result.err } - var err error - subjExt, err = m.subjProvider.GetSubjectExtended(addr) + subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) if err != nil { + if m.isCacheableError(err) { + m.subjExtCache.Add(addr, subjectExtWithError{ + err: err, + }) + m.subjCache.Add(addr, subjectWithError{ + err: err, + }) + } return nil, err } - m.subjExtCache.Add(addr, subjExt) - m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt)) + m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt}) + m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)}) return subjExt, nil } +func (m *morphFrostfsIDCache) isCacheableError(err error) bool { + return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) +} + func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject { return &client.Subject{ PrimaryKey: subjExt.PrimaryKey, diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 3a38b2cca..6b6d44750 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -1,16 +1,22 @@ package main import ( + "context" "crypto/tls" "errors" + "fmt" "net" "time" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -18,11 +24,11 @@ import ( const maxRecvMsgSize = 256 << 20 -func initGRPC(c *cfg) { +func initGRPC(ctx context.Context, c *cfg) { var endpointsToReconnect []string var successCount int grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { - serverOpts, ok := getGrpcServerOpts(c, sc) + serverOpts, ok := getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -30,7 +36,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint()) - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint()) return } @@ -39,7 +45,7 @@ func initGRPC(c *cfg) { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.append(sc.Endpoint(), lis, srv) @@ -52,11 +58,11 @@ func initGRPC(c *cfg) { c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg) for _, endpoint := range endpointsToReconnect { - scheduleReconnect(endpoint, c) + scheduleReconnect(ctx, endpoint, c) } } -func scheduleReconnect(endpoint string, c *cfg) { +func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) { c.wg.Add(1) go func() { defer c.wg.Done() @@ -65,7 +71,7 @@ func scheduleReconnect(endpoint string, c *cfg) { for { select { case <-t.C: - if tryReconnect(endpoint, c) { + if tryReconnect(ctx, endpoint, c) { return } case <-c.done: @@ -75,20 +81,20 @@ func scheduleReconnect(endpoint string, c *cfg) { }() } -func tryReconnect(endpoint string, c *cfg) bool { - c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) +func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool { + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) - serverOpts, found := getGRPCEndpointOpts(endpoint, c) + serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c) if !found { - c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) return true } lis, err := net.Listen("tcp", endpoint) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint) - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) return false } c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint) @@ -96,16 +102,16 @@ func tryReconnect(endpoint string, c *cfg) bool { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.appendAndHandle(endpoint, lis, srv) - c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) return true } -func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { +func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { unlock := c.LockAppConfigShared() defer unlock() grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { @@ -116,7 +122,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } var ok bool - result, ok = getGrpcServerOpts(c, sc) + result, ok = getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -125,16 +131,20 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } -func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { +func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( + qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), grpc.ChainStreamInterceptor( + qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), } @@ -143,7 +153,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return nil, false } @@ -174,38 +184,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool return serverOpts, true } -func serveGRPC(c *cfg) { +func serveGRPC(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) { c.wg.Add(1) go func() { defer func() { - c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint, zap.Stringer("endpoint", l.Addr()), ) c.wg.Done() }() - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", "gRPC"), zap.Stringer("endpoint", l.Addr()), ) if err := s.Serve(l); err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e) - c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err)) c.cfgGRPC.dropConnection(e) - scheduleReconnect(e, c) + scheduleReconnect(ctx, e, c) } }() }) } -func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { - l = &logger.Logger{Logger: l.With(zap.String("name", name))} +func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) { + l = l.With(zap.String("name", name)) - l.Info(logs.FrostFSNodeStoppingGRPCServer) + l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -217,9 +227,60 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) + l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully) + l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) +} + +func initRPCLimiter(c *cfg) error { + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(c.appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) + } + + if err := validateRPCLimits(c, limits); err != nil { + return fmt.Errorf("validate RPC limits: %w", err) + } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + if err != nil { + return fmt.Errorf("create RPC limiter: %w", err) + } + + c.cfgGRPC.limiter.Store(limiter) + return nil +} + +func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { + availableMethods := getAvailableMethods(c.cfgGRPC.servers) + for _, limit := range limits { + for _, method := range limit.Keys { + if _, ok := availableMethods[method]; !ok { + return fmt.Errorf("set limit on an unknown method %q", method) + } + } + } + return nil +} + +func getAvailableMethods(servers []grpcServer) map[string]struct{} { + res := make(map[string]struct{}) + for _, server := range servers { + for _, method := range getMethodsForServer(server.Server) { + res[method] = struct{}{} + } + } + return res +} + +func getMethodsForServer(server *grpc.Server) []string { + var res []string + for service, info := range server.GetServiceInfo() { + for _, method := range info.Methods { + res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) + } + } + return res } diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go index 2ec20d848..7346206ef 100644 --- a/cmd/frostfs-node/httpcomponent.go +++ b/cmd/frostfs-node/httpcomponent.go @@ -20,9 +20,9 @@ type httpComponent struct { preReload func(c *cfg) } -func (cmp *httpComponent) init(c *cfg) { +func (cmp *httpComponent) init(ctx context.Context, c *cfg) { if !cmp.enabled { - c.log.Info(cmp.name + " is disabled") + c.log.Info(ctx, cmp.name+" is disabled") return } // Init server with parameters @@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) { go func() { defer c.wg.Done() - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", cmp.name), zap.String("endpoint", cmp.address)) fatalOnErr(srv.Serve()) }() c.closers = append(c.closers, closer{ cmp.name, - func() { stopAndLog(c, cmp.name, srv.Shutdown) }, + func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) }, }) } @@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error { // Cleanup delCloser(cmp.cfg, cmp.name) // Init server with new parameters - cmp.init(cmp.cfg) + cmp.init(ctx, cmp.cfg) // Start worker if cmp.enabled { startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name)) diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index e4f0a434c..0228d2a10 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -61,21 +61,21 @@ func main() { var ctx context.Context ctx, c.ctxCancel = context.WithCancel(context.Background()) - c.setHealthStatus(control.HealthStatus_STARTING) + c.setHealthStatus(ctx, control.HealthStatus_STARTING) initApp(ctx, c) bootUp(ctx, c) - c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY) + c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY) wait(c) } -func initAndLog(c *cfg, name string, initializer func(*cfg)) { - c.log.Info(fmt.Sprintf("initializing %s service...", name)) +func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) { + c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name)) initializer(c) - c.log.Info(name + " service has been successfully initialized") + c.log.Info(ctx, name+" service has been successfully initialized") } func initApp(ctx context.Context, c *cfg) { @@ -85,72 +85,75 @@ func initApp(ctx context.Context, c *cfg) { c.wg.Done() }() - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) metrics, _ := metricsComponent(c) - initAndLog(c, "profiler", initProfilerService) - initAndLog(c, metrics.name, metrics.init) + initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) }) + initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) }) - initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) }) + initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) }) initLocalStorage(ctx, c) - initAndLog(c, "storage engine", func(c *cfg) { + initAndLog(ctx, c, "storage engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx)) fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx)) }) - initAndLog(c, "gRPC", initGRPC) - initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) + initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) initAccessPolicyEngine(ctx, c) - initAndLog(c, "access policy engine", func(c *cfg) { + initAndLog(ctx, c, "access policy engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx)) fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init()) }) - initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) - initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) }) - initAndLog(c, "session", initSessionService) - initAndLog(c, "object", initObjectService) - initAndLog(c, "tree", initTreeService) - initAndLog(c, "apemanager", initAPEManagerService) - initAndLog(c, "control", initControlService) + initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) + initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) }) + initAndLog(ctx, c, "session", initSessionService) + initAndLog(ctx, c, "object", initObjectService) + initAndLog(ctx, c, "tree", initTreeService) + initAndLog(ctx, c, "apemanager", initAPEManagerService) + initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) - initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) + initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) + + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) { - c.log.Info(fmt.Sprintf("starting %s service...", name)) + c.log.Info(ctx, fmt.Sprintf("starting %s service...", name)) starter(ctx, c) if logSuccess { - c.log.Info(name + " service started successfully") + c.log.Info(ctx, name+" service started successfully") } } -func stopAndLog(c *cfg, name string, stopper func() error) { - c.log.Debug(fmt.Sprintf("shutting down %s service", name)) +func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) { + c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name)) - err := stopper() + err := stopper(ctx) if err != nil { - c.log.Debug(fmt.Sprintf("could not shutdown %s server", name), - zap.String("error", err.Error()), + c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), + zap.Error(err), ) } - c.log.Debug(name + " service has been stopped") + c.log.Debug(ctx, name+" service has been stopped") } func bootUp(ctx context.Context, c *cfg) { - runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) }) + runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) }) runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit) - bootstrapNode(c) + bootstrapNode(ctx, c) startWorkers(ctx, c) } func wait(c *cfg) { - c.log.Info(logs.CommonApplicationStarted, + c.log.Info(context.Background(), logs.CommonApplicationStarted, zap.String("version", misc.Version)) <-c.done // graceful shutdown @@ -160,12 +163,12 @@ func wait(c *cfg) { go func() { defer drain.Done() for err := range c.internalErr { - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) } }() - c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) + c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop) c.wg.Wait() diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index 19b4af51f..d9ca01e70 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.metrics == nil { - c.dynamicConfiguration.metrics = new(httpComponent) - c.dynamicConfiguration.metrics.cfg = c - c.dynamicConfiguration.metrics.name = "metrics" - c.dynamicConfiguration.metrics.handler = metrics.Handler() + if c.metrics == nil { + c.metrics = new(httpComponent) + c.metrics.cfg = c + c.metrics.name = "metrics" + c.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.metrics.enabled { - c.dynamicConfiguration.metrics.enabled = enabled + if enabled != c.metrics.enabled { + c.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.metrics.address { - c.dynamicConfiguration.metrics.address = address + if address != c.metrics.address { + c.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.metrics.shutdownDur { - c.dynamicConfiguration.metrics.shutdownDur = dur + if dur != c.metrics.shutdownDur { + c.metrics.shutdownDur = dur updated = true } - return c.dynamicConfiguration.metrics, updated + return c.metrics, updated } func enableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Enable() + c.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Disable() + c.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 1b148095b..917cf6fc0 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -23,12 +24,56 @@ import ( const ( newEpochNotification = "NewEpoch" - - // amount of tries(blocks) before notary deposit timeout. - notaryDepositRetriesAmount = 300 ) -func initMorphComponents(ctx context.Context, c *cfg) { +func (c *cfg) initMorphComponents(ctx context.Context) { + c.cfgMorph.guard.Lock() + defer c.cfgMorph.guard.Unlock() + if c.cfgMorph.initialized { + return + } + initMorphClient(ctx, c) + + lookupScriptHashesInNNS(c) // smart contract auto negotiation + + err := c.cfgMorph.client.EnableNotarySupport( + client.WithProxyContract( + c.cfgMorph.proxyScriptHash, + ), + ) + fatalOnErr(err) + + c.log.Info(ctx, logs.FrostFSNodeNotarySupport) + + wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) + fatalOnErr(err) + + var netmapSource netmap.Source + + c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg) + c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) + + if c.cfgMorph.cacheTTL == 0 { + msPerBlock, err := c.cfgMorph.client.MsPerBlock() + fatalOnErr(err) + c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond + c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) + } + + if c.cfgMorph.cacheTTL < 0 { + netmapSource = newRawNetmapStorage(wrap) + } else { + // use RPC node as source of netmap (with caching) + netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, + morphconfig.NetmapCandidatesPollInterval(c.appCfg)) + } + + c.netMapSource = netmapSource + c.cfgNetmap.wrapper = wrap + c.cfgMorph.initialized = true +} + +func initMorphClient(ctx context.Context, c *cfg) { addresses := morphconfig.RPCEndpoint(c.appCfg) // Morph client stable-sorts endpoints by priority. Shuffle here to randomize @@ -40,7 +85,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log), + client.WithLogger(c.log.WithTag(logger.TagMorph)), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -48,90 +93,46 @@ func initMorphComponents(ctx context.Context, c *cfg) { }), client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)), client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()), + client.WithDialerSource(c.dialerSource), ) if err != nil { - c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient, + c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), - zap.String("error", err.Error()), + zap.Error(err), ) fatalOnErr(err) } c.onShutdown(func() { - c.log.Info(logs.FrostFSNodeClosingMorphComponents) + c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents) cli.Close() }) if err := cli.SetGroupSignerScope(); err != nil { - c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) + c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) } c.cfgMorph.client = cli - c.cfgMorph.notaryEnabled = cli.ProbeNotary() - - lookupScriptHashesInNNS(c) // smart contract auto negotiation - - if c.cfgMorph.notaryEnabled { - err = c.cfgMorph.client.EnableNotarySupport( - client.WithProxyContract( - c.cfgMorph.proxyScriptHash, - ), - ) - fatalOnErr(err) - } - - c.log.Info(logs.FrostFSNodeNotarySupport, - zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), - ) - - wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) - fatalOnErr(err) - - var netmapSource netmap.Source - - c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) - - if c.cfgMorph.cacheTTL == 0 { - msPerBlock, err := c.cfgMorph.client.MsPerBlock() - fatalOnErr(err) - c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) - } - - if c.cfgMorph.cacheTTL < 0 { - netmapSource = wrap - } else { - // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) - } - - c.netMapSource = netmapSource - c.cfgNetmap.wrapper = wrap } func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { - // skip notary deposit in non-notary environments - if !c.cfgMorph.notaryEnabled { - return - } - - tx, err := makeNotaryDeposit(c) + tx, vub, err := makeNotaryDeposit(ctx, c) fatalOnErr(err) if tx.Equals(util.Uint256{}) { // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) + c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) return } - err = waitNotaryDeposit(ctx, c, tx) + err = waitNotaryDeposit(ctx, c, tx, vub) fatalOnErr(err) } -func makeNotaryDeposit(c *cfg) (util.Uint256, error) { +func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) { const ( // gasMultiplier defines how many times more the notary // balance must be compared to the GAS balance of the node: @@ -145,43 +146,19 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor) if err != nil { - return util.Uint256{}, fmt.Errorf("could not calculate notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err) } - return c.cfgMorph.client.DepositEndlessNotary(depositAmount) + return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount) } -var ( - errNotaryDepositFail = errors.New("notary deposit tx has faulted") - errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network") -) - -func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error { - for i := 0; i < notaryDepositRetriesAmount; i++ { - c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - ok, err := c.cfgMorph.client.TxHalt(tx) - if err == nil { - if ok { - c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) - return nil - } - - return errNotaryDepositFail - } - - err = c.cfgMorph.client.Wait(ctx, 1) - if err != nil { - return fmt.Errorf("could not wait for one block in chain: %w", err) - } +func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { + if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { + return err } - return errNotaryDepositTimeout + c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) + return nil } func listenMorphNotifications(ctx context.Context, c *cfg) { @@ -189,22 +166,23 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) + log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: c.log, + Log: log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: c.log, + Logger: log, Subscriber: subs, }) fatalOnErr(err) @@ -222,7 +200,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain, + log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -232,12 +210,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers) registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) - registerBlockHandler(lis, func(block *block.Block) { - c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { + log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState, + log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } @@ -248,27 +226,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse subs map[event.Type][]event.Handler, ) { for typ, handlers := range subs { - pi := event.NotificationParserInfo{} - pi.SetType(typ) - pi.SetScriptHash(scHash) - p, ok := parsers[typ] if !ok { panic(fmt.Sprintf("missing parser for event %s", typ)) } - pi.SetParser(p) - - lis.SetNotificationParser(pi) - - for _, h := range handlers { - hi := event.NotificationHandlerInfo{} - hi.SetType(typ) - hi.SetScriptHash(scHash) - hi.SetHandler(h) - - lis.RegisterNotificationHandler(hi) - } + lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ + Contract: scHash, + Type: typ, + Parser: p, + Handlers: handlers, + }) } } @@ -297,10 +265,6 @@ func lookupScriptHashesInNNS(c *cfg) { ) for _, t := range targets { - if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled { - continue // ignore proxy contract if notary disabled - } - if emptyHash.Equals(*t.h) { *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 128cc3005..7dfb4fe12 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,7 +8,7 @@ import ( "net" "sync/atomic" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -19,6 +19,7 @@ import ( netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" + netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "go.uber.org/zap" @@ -61,13 +62,15 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { if ni != nil { s.nodeInfo.Store(*ni) - switch { - case ni.IsOnline(): + switch ni.Status() { + case netmapSDK.Online: ctrlNetSt = control.NetmapStatus_ONLINE - case ni.IsOffline(): + case netmapSDK.Offline: ctrlNetSt = control.NetmapStatus_OFFLINE - case ni.IsMaintenance(): + case netmapSDK.Maintenance: ctrlNetSt = control.NetmapStatus_MAINTENANCE + case netmapSDK.UnspecifiedState: + ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED } } else { ctrlNetSt = control.NetmapStatus_OFFLINE @@ -78,7 +81,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { // nil ni means that the node is not included // in the netmap - niOld.SetOffline() + niOld.SetStatus(netmapSDK.Offline) s.nodeInfo.Store(niOld) } @@ -102,9 +105,7 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - if !ok { - panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) - } + assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) } return @@ -122,7 +123,11 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.IterateNetworkEndpoints(f) + for s := range ni.NetworkEndpoints() { + if f(s) { + return + } + } } } @@ -139,13 +144,11 @@ func initNetmapService(ctx context.Context, c *cfg) { network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo) c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes()) parseAttributes(c) - c.cfgNodeInfo.localInfo.SetOffline() + c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline) - if c.cfgMorph.client == nil { - initMorphComponents(ctx, c) - } + c.initMorphComponents(ctx) - initNetmapState(c) + initNetmapState(ctx, c) server := netmapTransportGRPC.New( netmapService.NewSignService( @@ -166,53 +169,52 @@ func initNetmapService(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { netmapGRPC.RegisterNetmapServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server) }) addNewEpochNotificationHandlers(c) } func addNewEpochNotificationHandlers(c *cfg) { - addNewEpochNotificationHandler(c, func(ev event.Event) { + addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber()) }) - addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { e := ev.(netmapEvent.NewEpoch).EpochNumber() - c.updateContractNodeInfo(e) + c.updateContractNodeInfo(ctx, e) - if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } - if err := c.bootstrap(); err != nil { - c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) + if err := c.bootstrap(ctx); err != nil { + c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } }) - if c.cfgMorph.notaryEnabled { - addNewEpochAsyncNotificationHandler(c, func(_ event.Event) { - _, err := makeNotaryDeposit(c) - if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit, - zap.String("error", err.Error()), - ) - } - }) - } + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { + _, _, err := makeNotaryDeposit(ctx, c) + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, + zap.Error(err), + ) + } + }) } // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. -func bootstrapNode(c *cfg) { - if c.needBootstrap() { - if c.IsMaintenance() { - c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return - } - err := c.bootstrap() - fatalOnErrDetails("bootstrap error", err) +func bootstrapNode(ctx context.Context, c *cfg) { + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -237,46 +239,47 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. -func initNetmapState(c *cfg) { - epoch, err := c.cfgNetmap.wrapper.Epoch() +func initNetmapState(ctx context.Context, c *cfg) { + epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo - ni, err = c.netmapInitLocalNodeState(epoch) + ni, err = c.netmapInitLocalNodeState(ctx, epoch) fatalOnErrDetails("could not init network state", err) stateWord := nodeState(ni) - c.log.Info(logs.FrostFSNodeInitialNetworkState, + c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) - if ni != nil && ni.IsMaintenance() { + if ni != nil && ni.Status().IsMaintenance() { c.isMaintenance.Store(true) } c.cfgNetmap.state.setCurrentEpoch(epoch) - c.cfgNetmap.startEpoch = epoch c.setContractNodeInfo(ni) } func nodeState(ni *netmapSDK.NodeInfo) string { if ni != nil { - switch { - case ni.IsOnline(): + switch ni.Status() { + case netmapSDK.Online: return "online" - case ni.IsOffline(): + case netmapSDK.Offline: return "offline" - case ni.IsMaintenance(): + case netmapSDK.Maintenance: return "maintenance" + case netmapSDK.UnspecifiedState: + return "undefined" } } return "undefined" } -func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { - nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() +func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { + nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) if err != nil { return nil, err } @@ -289,7 +292,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error } } - node, err := c.netmapLocalNodeState(epoch) + node, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { return nil, err } @@ -303,16 +306,16 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error if nmState != candidateState { // This happens when the node was switched to maintenance without epoch tick. // We expect it to continue staying in maintenance. - c.log.Info(logs.CandidateStatusPriority, + c.log.Info(ctx, logs.CandidateStatusPriority, zap.String("netmap", nmState), zap.String("candidate", candidateState)) } return candidate, nil } -func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { // calculate current network state - nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch) + nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, err } @@ -347,35 +350,29 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } -var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") - -func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error { +func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: return fmt.Errorf("unsupported status %v", st) case control.NetmapStatus_MAINTENANCE: - return c.setMaintenanceStatus(false) + return c.setMaintenanceStatus(ctx, false) case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE: } - c.stopMaintenance() - - if !c.needBootstrap() { - return errRelayBootstrap - } + c.stopMaintenance(ctx) if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) - return bootstrapOnline(c) + return bootstrapOnline(ctx, c) } c.cfgNetmap.reBoostrapTurnedOff.Store(true) - return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {}) + return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } -func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { - epoch, err := c.netMapSource.Epoch() +func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { + epoch, err := c.netMapSource.Epoch(ctx) if err != nil { return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) } @@ -383,12 +380,12 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { return st, epoch, nil } -func (c *cfg) ForceMaintenance() error { - return c.setMaintenanceStatus(true) +func (c *cfg) ForceMaintenance(ctx context.Context) error { + return c.setMaintenanceStatus(ctx, true) } -func (c *cfg) setMaintenanceStatus(force bool) error { - netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() +func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { + netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) } else if !netSettings.MaintenanceModeAllowed { @@ -396,10 +393,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error { } if err == nil || force { - c.startMaintenance() + c.startMaintenance(ctx) if err == nil { - err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance) + err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance) } if err != nil { @@ -412,13 +409,16 @@ func (c *cfg) setMaintenanceStatus(force bool) error { // calls UpdatePeerState operation of Netmap contract's client for the local node. // State setter is used to specify node state to switch to. -func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error { +func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error { var prm nmClient.UpdatePeerPrm prm.SetKey(c.key.PublicKey().Bytes()) stateSetter(&prm) - _, err := c.cfgNetmap.wrapper.UpdatePeerState(prm) - return err + res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) + if err != nil { + return err + } + return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) } type netInfo struct { @@ -433,7 +433,7 @@ type netInfo struct { msPerBlockRdr func() (int64, error) } -func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { +func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { magic, err := n.magic.MagicNumber() if err != nil { return nil, err @@ -443,7 +443,7 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetMagicNumber(magic) - netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration() + netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) if err != nil { return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) } diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go new file mode 100644 index 000000000..e6be9cdf5 --- /dev/null +++ b/cmd/frostfs-node/netmap_source.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +type rawNetmapSource struct { + client *netmapClient.Client +} + +func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { + return &rawNetmapSource{ + client: client, + } +} + +func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMap(ctx, diff) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMapByEpoch(ctx, epoch) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { + return s.client.Epoch(ctx) +} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 0124bf772..c33c02b3f 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -2,12 +2,9 @@ package main import ( "context" - "errors" "fmt" "net" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" @@ -16,18 +13,16 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" getsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get/v2" + patchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/patch" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" putsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put/v2" searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" @@ -36,8 +31,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -54,23 +50,29 @@ type objectSvc struct { get *getsvcV2.Service delete *deletesvcV2.Service + + patch *patchsvc.Service } -func (c *cfg) MaxObjectSize() uint64 { - sz, err := c.cfgNetmap.wrapper.MaxObjectSize() +func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { + sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, - zap.String("error", err.Error()), + c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, + zap.Error(err), ) } return sz } -func (s *objectSvc) Put() (objectService.PutObjectStream, error) { +func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) { return s.put.Put() } +func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) { + return s.patch.Patch() +} + func (s *objectSvc) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { return s.put.PutSingle(ctx, req) } @@ -101,16 +103,15 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe type delNetInfo struct { netmap.State - tsLifetime uint64 cfg *cfg } func (i *delNetInfo) TombstoneLifetime() (uint64, error) { - return i.tsLifetime, nil + return i.cfg.cfgObject.tombstoneLifetime.Load(), nil } -// returns node owner ID calculated from configured private key. +// LocalNodeID returns node owner ID calculated from configured private key. // // Implements method needed for Object.Delete service. func (i *delNetInfo) LocalNodeID() user.ID { @@ -121,8 +122,8 @@ type innerRingFetcherWithNotary struct { sidechain *morphClient.Client } -func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { - keys, err := fn.sidechain.NeoFSAlphabetList() +func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { + keys, err := fn.sidechain.NeoFSAlphabetList(ctx) if err != nil { return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) } @@ -135,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { return result, nil } -type innerRingFetcherWithoutNotary struct { - nm *nmClient.Client -} - -func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) { - keys, err := f.nm.GetInnerRingList() - if err != nil { - return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err) - } - - result := make([][]byte, 0, len(keys)) - for i := range keys { - result = append(result, keys[i].Bytes()) - } - - return result, nil -} - func initObjectService(c *cfg) { keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) @@ -167,11 +150,12 @@ func initObjectService(c *cfg) { sPutV2 := createPutSvcV2(sPut, keyStorage) - sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache) + sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource) sSearchV2 := createSearchSvcV2(sSearch, keyStorage) - sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource) + sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource, + c.ObjectCfg.priorityMetrics) *c.cfgObject.getSvc = *sGet // need smth better @@ -181,17 +165,17 @@ func initObjectService(c *cfg) { sDeleteV2 := createDeleteServiceV2(sDelete) + sPatch := createPatchSvc(sGet, sPut) + // build service pipeline - // grpc | audit | | signature | response | acl | ape | split + // grpc | audit | qos | | signature | response | acl | ape | split - splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2) + splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, splitSvc) - - aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) + apeSvc := createAPEService(c, &irFetcher, splitSvc) var commonSvc objectService.Common - commonSvc.Init(&c.internals, aclSvc) + commonSvc.Init(&c.internals, apeSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -203,19 +187,23 @@ func initObjectService(c *cfg) { respSvc, ) - c.shared.metricsSvc = objectService.NewMetricCollector( + c.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit) + qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) + auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { objectGRPC.RegisterObjectServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server) }) } func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) { if policerconfig.UnsafeDisable(c.appCfg) { - c.log.Warn(logs.FrostFSNodePolicerIsDisabled) + c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled) return } @@ -226,14 +214,12 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl prm.MarkAsGarbage(addr) prm.WithForceRemoval() - _, err := ls.Inhume(ctx, prm) - return err + return ls.Inhume(ctx, prm) } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) - pol := policer.New( - policer.WithLogger(c.log), + policer.WithLogger(c.log.WithTag(logger.TagPolicer)), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -277,10 +263,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl var inhumePrm engine.InhumePrm inhumePrm.MarkAsGarbage(addr) - _, err := ls.Inhume(ctx, inhumePrm) - if err != nil { - c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, - zap.String("error", err.Error()), + if err := ls.Inhume(ctx, inhumePrm); err != nil { + c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, + zap.Error(err), ) } }), @@ -296,14 +281,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { - if c.cfgMorph.client.ProbeNotary() { - return &innerRingFetcherWithNotary{ - sidechain: c.cfgMorph.client, - } - } - return &innerRingFetcherWithoutNotary{ - nm: c.cfgNetmap.wrapper, +func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { + return &innerRingFetcherWithNotary{ + sidechain: c.cfgMorph.client, } } @@ -311,13 +291,13 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log), + replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), replicator.WithLocalStorage(ls), replicator.WithRemoteSender( - putsvc.NewRemoteSender(keyStorage, cache), + objectwriter.NewRemoteSender(keyStorage, cache), ), replicator.WithRemoteGetter( getsvc.NewRemoteGetter(c.clientCache, c.netMapSource, keyStorage), @@ -329,7 +309,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage - var os putsvc.ObjectStorage = engineWithoutNotifications{ + var os objectwriter.ObjectStorage = engineWithoutNotifications{ engine: ls, } @@ -343,9 +323,8 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche c, c.cfgNetmap.state, irFetcher, - putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), - putsvc.WithLogger(c.log), - putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), + objectwriter.WithLogger(c.log), + objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), ) } @@ -353,7 +332,11 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2 return putsvcV2.NewService(sPut, keyStorage) } -func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service { +func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Service { + return patchsvc.NewService(sPut.Config, sGet) +} + +func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage return searchsvc.New( @@ -364,7 +347,8 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav ), c.netMapSource, keyStorage, - searchsvc.WithLogger(c.log), + containerSource, + searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), ) } @@ -375,6 +359,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage) func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source, + priorityMetrics []placement.Metric, ) *getsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage @@ -384,10 +369,12 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ls, traverseGen.WithTraverseOptions( placement.SuccessAfter(1), + placement.WithPriorityMetrics(priorityMetrics), + placement.WithNodeState(c), ), coreConstructor, containerSource, - getsvc.WithLogger(c.log)) + getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -398,7 +385,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log), + getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), ) } @@ -410,13 +397,12 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi sSearch, sPut, &delNetInfo{ - State: c.cfgNetmap.state, - tsLifetime: c.cfgObject.tombstoneLifetime, + State: c.cfgNetmap.state, cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log), + deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), ) } @@ -425,7 +411,7 @@ func createDeleteServiceV2(sDelete *deletesvc.Service) *deletesvcV2.Service { } func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Service, - sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service, + sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service, sPatch *patchsvc.Service, ) *objectService.TransportSplitter { return objectService.NewTransportSplitter( c.cfgGRPC.maxChunkSize, @@ -435,67 +421,28 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi search: sSearchV2, get: sGetV2, delete: sDeleteV2, + patch: sPatch, }, ) } -func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { - ls := c.cfgObject.cfgLocalStorage.localStorage - - return v2.New( - apeSvc, - c.netMapSource, - irFetcher, - acl.NewChecker( - c.cfgNetmap.state, - c.cfgObject.eaclSource, - eaclSDK.NewValidator(), - ls), - c.cfgObject.cnrSource, - v2.WithLogger(c.log), - ) -} - -func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( - c.log, objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.shared.frostfsidClient, + c.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, c.binPublicKey, ), + objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } -type morphEACLFetcher struct { - w *cntClient.Client -} - -func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) { - eaclInfo, err := s.w.GetEACL(cnr) - if err != nil { - return nil, err - } - - binTable, err := eaclInfo.Value.Marshal() - if err != nil { - return nil, fmt.Errorf("marshal eACL table: %w", err) - } - - if !eaclInfo.Signature.Verify(binTable) { - // TODO(@cthulhu-rider): #468 use "const" error - return nil, errors.New("invalid signature of the eACL table") - } - - return eaclInfo, nil -} - type engineWithoutNotifications struct { engine *engine.StorageEngine } @@ -515,14 +462,13 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad prm.WithTarget(tombstone, addrs...) - _, err := e.engine.Inhume(ctx, prm) - return err + return e.engine.Inhume(ctx, prm) } func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock) } -func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error { - return engine.Put(ctx, e.engine, o) +func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error { + return engine.Put(ctx, e.engine, o, indexedContainer) } diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go index 22fda2b4c..55f76cc76 100644 --- a/cmd/frostfs-node/policy_engine.go +++ b/cmd/frostfs-node/policy_engine.go @@ -21,7 +21,9 @@ type accessPolicyEngine struct { var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil) type morphAPEChainCacheKey struct { - name chain.Name + // nolint:unused + name chain.Name + // nolint:unused target engine.Target } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index dcd320146..e4da8119f 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -1,49 +1,50 @@ package main import ( + "context" "runtime" profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" ) -func initProfilerService(c *cfg) { +func initProfilerService(ctx context.Context, c *cfg) { tuneProfilers(c) pprof, _ := pprofComponent(c) - pprof.init(c) + pprof.init(ctx, c) } func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.pprof == nil { - c.dynamicConfiguration.pprof = new(httpComponent) - c.dynamicConfiguration.pprof.cfg = c - c.dynamicConfiguration.pprof.name = "pprof" - c.dynamicConfiguration.pprof.handler = httputil.Handler() - c.dynamicConfiguration.pprof.preReload = tuneProfilers + if c.pprof == nil { + c.pprof = new(httpComponent) + c.pprof.cfg = c + c.pprof.name = "pprof" + c.pprof.handler = httputil.Handler() + c.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.pprof.enabled { - c.dynamicConfiguration.pprof.enabled = enabled + if enabled != c.pprof.enabled { + c.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.pprof.address { - c.dynamicConfiguration.pprof.address = address + if address != c.pprof.address { + c.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.pprof.shutdownDur { - c.dynamicConfiguration.pprof.shutdownDur = dur + if dur != c.pprof.shutdownDur { + c.pprof.shutdownDur = dur updated = true } - return c.dynamicConfiguration.pprof, updated + return c.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go new file mode 100644 index 000000000..6394b668b --- /dev/null +++ b/cmd/frostfs-node/qos.go @@ -0,0 +1,108 @@ +package main + +import ( + "bytes" + "context" + + qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "go.uber.org/zap" +) + +type cfgQoSService struct { + netmapSource netmap.Source + logger *logger.Logger + allowedCriticalPubs [][]byte + allowedInternalPubs [][]byte +} + +func initQoSService(c *cfg) { + criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) + internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) + rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) + rawInternalPubs := make([][]byte, 0, len(internalPubs)) + for i := range criticalPubs { + rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) + } + for i := range internalPubs { + rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) + } + + c.cfgQoSService = cfgQoSService{ + netmapSource: c.netMapSource, + logger: c.log, + allowedCriticalPubs: rawCriticalPubs, + allowedInternalPubs: rawInternalPubs, + } +} + +func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { + rawTag, defined := qosTagging.IOTagFromContext(ctx) + if !defined { + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) + } + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + ioTag, err := qos.FromRawString(rawTag) + if err != nil { + s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + + switch ioTag { + case qos.IOTagClient: + return ctx + case qos.IOTagCritical: + for _, pk := range s.allowedCriticalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + case qos.IOTagInternal: + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return ctx + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + default: + s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } +} + +func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, publicKey) { + return true + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return false + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), publicKey) { + return true + } + } + + return false +} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go new file mode 100644 index 000000000..971f9eebf --- /dev/null +++ b/cmd/frostfs-node/qos_test.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSService_Client(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag client defined", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func TestQoSService_Internal(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) +} + +func TestQoSService_Critical(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) +} + +func TestQoSService_NetmapGetError(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + s.netmapSource = &utilTesting.TestNetmapSource{} + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { + nmSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + reqSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedCritSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedIntSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + var node netmap.NodeInfo + node.SetPublicKey(nmSigner.PublicKey().Bytes()) + nm := &netmap.NetMap{} + nm.SetEpoch(100) + nm.SetNodes([]netmap.NodeInfo{node}) + + return &cfgQoSService{ + logger: test.NewLogger(t), + netmapSource: &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ + 100: nm, + }, + CurrentEpoch: 100, + }, + allowedCriticalPubs: [][]byte{ + allowedCritSigner.PublicKey().Bytes(), + }, + allowedInternalPubs: [][]byte{ + allowedIntSigner.PublicKey().Bytes(), + }, + }, + &testQoSServicePublicKeys{ + NetmapNode: nmSigner.PublicKey().Bytes(), + Request: reqSigner.PublicKey().Bytes(), + Internal: allowedIntSigner.PublicKey().Bytes(), + Critical: allowedCritSigner.PublicKey().Bytes(), + } +} + +type testQoSServicePublicKeys struct { + NetmapNode []byte + Request []byte + Internal []byte + Critical []byte +} diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go index d858ba490..f6d398574 100644 --- a/cmd/frostfs-node/runtime.go +++ b/cmd/frostfs-node/runtime.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "runtime/debug" @@ -9,17 +10,17 @@ import ( "go.uber.org/zap" ) -func setRuntimeParameters(c *cfg) { +func setRuntimeParameters(ctx context.Context, c *cfg) { if len(os.Getenv("GOMEMLIMIT")) != 0 { // default limit < yaml limit < app env limit < GOMEMLIMIT - c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) + c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) return } memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg) previous := debug.SetMemoryLimit(memLimitBytes) if memLimitBytes != previous { - c.log.Info(logs.RuntimeSoftMemoryLimitUpdated, + c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated, zap.Int64("new_value", memLimitBytes), zap.Int64("old_value", previous)) } diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index ee21ec230..fbfe3f5e6 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -6,8 +6,6 @@ import ( "net" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -16,6 +14,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "google.golang.org/grpc" ) @@ -48,18 +49,21 @@ func initSessionService(c *cfg) { _ = c.privateTokenStore.Close() }) - addNewEpochNotificationHandler(c, func(ev event.Event) { + addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber()) }) server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), ), ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { sessionGRPC.RegisterSessionServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server) }) } diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go index 312adfb8d..65f5aec15 100644 --- a/cmd/frostfs-node/tracing.go +++ b/cmd/frostfs-node/tracing.go @@ -11,21 +11,25 @@ import ( ) func initTracing(ctx context.Context, c *cfg) { - conf := tracingconfig.ToTracingConfig(c.appCfg) - - _, err := tracing.Setup(ctx, *conf) + conf, err := tracingconfig.ToTracingConfig(c.appCfg) if err != nil { - c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) + return + } + _, err = tracing.Setup(ctx, *conf) + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) + return } c.closers = append(c.closers, closer{ name: "tracing", fn: func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*5) defer cancel() err := tracing.Shutdown(ctx) // cfg context cancels before close if err != nil { - c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) } }, }) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index d22e510de..62af45389 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -29,49 +30,50 @@ type cnrSource struct { cli *containerClient.Client } -func (c cnrSource) Get(id cid.ID) (*container.Container, error) { - return c.src.Get(id) +func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return c.src.Get(ctx, id) } -func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) { - return c.src.DeletionInfo(cid) +func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { + return c.src.DeletionInfo(ctx, cid) } -func (c cnrSource) List() ([]cid.ID, error) { - return c.cli.ContainersOf(nil) +func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { + return c.cli.ContainersOf(ctx, nil) } func initTreeService(c *cfg) { treeConfig := treeconfig.Tree(c.appCfg) if !treeConfig.Enabled() { - c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) + c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) return } c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.shared.cnrClient, + cli: c.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), - tree.WithEACLSource(c.cfgObject.eaclSource), + tree.WithFrostfsidSubjectProvider(c.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log), + tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()), tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()), + tree.WithSyncBatchSize(treeConfig.SyncBatchSize()), tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()), tree.WithMetrics(c.metricsCollector.TreeService()), tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()), tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()), tree.WithNetmapState(c.cfgNetmap.state), + tree.WithDialerSource(c.dialerSource), ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, c.treeService) + tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) }) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { @@ -79,10 +81,10 @@ func initTreeService(c *cfg) { })) if d := treeConfig.SyncInterval(); d == 0 { - addNewEpochNotificationHandler(c, func(_ event.Event) { + addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) } }) } else { @@ -93,7 +95,7 @@ func initTreeService(c *cfg) { for range tick.C { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) if errors.Is(err, tree.ErrShuttingDown) { return } @@ -102,17 +104,17 @@ func initTreeService(c *cfg) { }() } - subscribeToContainerRemoval(c, func(e event.Event) { + subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { ev := e.(containerEvent.DeleteSuccess) // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) - err := c.treeService.DropTree(context.Background(), ev.ID, "") + c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) + err := c.treeService.DropTree(ctx, ev.ID, "") if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, + c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index ae52b9e4a..22d2e0aa9 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } + err = loggerPrm.SetTags(loggerconfig.Tags(c)) + if err != nil { + return fmt.Errorf("invalid list of allowed tags: %w", err) + } + // shard configuration validation shardNum := 0 diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index d9c0f167f..495365cf0 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,7 +1,6 @@ package main import ( - "os" "path/filepath" "testing" @@ -22,17 +21,4 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) - - t.Run("mainnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) - t.Run("testnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) } diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go new file mode 100644 index 000000000..e5a35ab71 --- /dev/null +++ b/cmd/internal/common/ape/commands.go @@ -0,0 +1,167 @@ +package ape + +import ( + "encoding/hex" + "errors" + "fmt" + "strconv" + "strings" + + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/spf13/cobra" +) + +const ( + defaultNamespace = "root" + namespaceTarget = "namespace" + containerTarget = "container" + userTarget = "user" + groupTarget = "group" + + Ingress = "ingress" + S3 = "s3" +) + +var mChainName = map[string]apechain.Name{ + Ingress: apechain.Ingress, + S3: apechain.S3, +} + +var ( + errSettingDefaultValueWasDeclined = errors.New("setting default value was declined") + errUnknownTargetType = errors.New("unknown target type") + errUnsupportedChainName = errors.New("unsupported chain name") +) + +// PrintHumanReadableAPEChain print APE chain rules. +func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { + cmd.Println("Chain ID: " + string(chain.ID)) + cmd.Printf(" HEX: %x\n", chain.ID) + cmd.Println("Rules:") + for _, rule := range chain.Rules { + cmd.Println("\n\tStatus: " + rule.Status.String()) + cmd.Println("\tAny: " + strconv.FormatBool(rule.Any)) + cmd.Println("\tConditions:") + for _, c := range rule.Condition { + var ot string + switch c.Kind { + case apechain.KindResource: + ot = "Resource" + case apechain.KindRequest: + ot = "Request" + default: + panic("unknown object type") + } + cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value)) + } + cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted)) + for _, name := range rule.Actions.Names { + cmd.Println("\t\t" + name) + } + cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted)) + for _, name := range rule.Resources.Names { + cmd.Println("\t\t" + name) + } + } +} + +// ParseTarget handles target parsing of an APE chain. +func ParseTarget(cmd *cobra.Command) engine.Target { + typ := ParseTargetType(cmd) + name, _ := cmd.Flags().GetString(TargetNameFlag) + switch typ { + case engine.Namespace: + if name == "" { + ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace)) + commonCmd.ExitOnErr(cmd, "read line error: %w", err) + ln = strings.ToLower(ln) + if len(ln) > 0 && (ln[0] == 'n') { + commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined) + } + name = defaultNamespace + } + return engine.NamespaceTarget(name) + case engine.Container: + var cnr cid.ID + commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + return engine.ContainerTarget(name) + case engine.User: + return engine.UserTarget(name) + case engine.Group: + return engine.GroupTarget(name) + default: + commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + } + panic("unreachable") +} + +// ParseTargetType handles target type parsing of an APE chain. +func ParseTargetType(cmd *cobra.Command) engine.TargetType { + typ, _ := cmd.Flags().GetString(TargetTypeFlag) + switch typ { + case namespaceTarget: + return engine.Namespace + case containerTarget: + return engine.Container + case userTarget: + return engine.User + case groupTarget: + return engine.Group + default: + commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType) + } + panic("unreachable") +} + +// ParseChainID handles the parsing of APE-chain identifier. +// For some subcommands, chain ID is optional as an input parameter and should be generated by +// the service instead. +func ParseChainID(cmd *cobra.Command) (id apechain.ID) { + chainID, _ := cmd.Flags().GetString(ChainIDFlag) + id = apechain.ID(chainID) + + hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag) + if !hexEncoded { + return + } + + chainIDRaw, err := hex.DecodeString(chainID) + commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) + id = apechain.ID(chainIDRaw) + return +} + +// ParseChain parses an APE chain which can be provided either as a rule statement +// or loaded from a binary/JSON file path. +func ParseChain(cmd *cobra.Command) *apechain.Chain { + chain := new(apechain.Chain) + chain.ID = ParseChainID(cmd) + + if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 { + commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules)) + } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" { + commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath)) + } else { + commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) + } + + cmd.Println("Parsed chain:") + PrintHumanReadableAPEChain(cmd, chain) + + return chain +} + +// ParseChainName parses chain name: the place in the request lifecycle where policy is applied. +func ParseChainName(cmd *cobra.Command) apechain.Name { + chainName, _ := cmd.Flags().GetString(ChainNameFlag) + apeChainName, ok := mChainName[strings.ToLower(chainName)] + if !ok { + commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName) + } + return apeChainName +} diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go new file mode 100644 index 000000000..d8b2e88a2 --- /dev/null +++ b/cmd/internal/common/ape/flags.go @@ -0,0 +1,79 @@ +package ape + +const ( + RuleFlag = "rule" + PathFlag = "path" + PathFlagDesc = "Path to encoded chain in JSON or binary format" + TargetNameFlag = "target-name" + TargetNameFlagDesc = "Resource name in APE resource name format" + TargetTypeFlag = "target-type" + TargetTypeFlagDesc = "Resource type(container/namespace)" + ChainIDFlag = "chain-id" + ChainIDFlagDesc = "Chain id" + ChainIDHexFlag = "chain-id-hex" + ChainIDHexFlagDesc = "Flag to parse chain ID as hex" + ChainNameFlag = "chain-name" + ChainNameFlagDesc = "Chain name(ingress|s3)" + AllFlag = "all" +) + +const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format: + [:status_detail] ... ... ... + +Status: + - allow Permits specified actions + - deny Prohibits specified actions + - deny:QuotaLimitReached Denies access due to quota limits + +Actions: + Object operations: + - Object.Put, Object.Get, etc. + - Object.* (all object operations) + Container operations: + - Container.Put, Container.Get, etc. + - Container.* (all container operations) + +Conditions: + ResourceCondition: + Format: ResourceCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Object:version + - $Object:objectID + - $Object:containerID + - $Object:ownerID + - $Object:creationEpoch + - $Object:payloadLength + - $Object:payloadHash + - $Object:objectType + - $Object:homomorphicHash + +RequestCondition: + Format: RequestCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Actor:publicKey + - $Actor:role + + Example: + ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others + +Resources: + For objects: + - namespace/cid/oid (specific object) + - namespace/cid/* (all objects in container) + - namespace/* (all objects in namespace) + - * (all objects) + - /* (all objects in root namespace) + - /cid/* (all objects in root container) + - /cid/oid (specific object in root container) + + For containers: + - namespace/cid (specific container) + - namespace/* (all containers in namespace) + - * (all containers) + - /cid (root container) + - /* (all root containers) + +Notes: + - Cannot mix object and container operations in one rule + - Default behavior is Any=false unless 'any' is specified + - Use 'all' keyword to explicitly set Any=false` diff --git a/cmd/internal/common/config/viper_test.go b/cmd/internal/common/config/viper_test.go new file mode 100644 index 000000000..d533a15c2 --- /dev/null +++ b/cmd/internal/common/config/viper_test.go @@ -0,0 +1,107 @@ +package config_test + +import ( + "encoding/json" + "os" + "path" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config/test" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestCreateReloadViper(t *testing.T) { + type m = map[string]any + + dummyFileSize := 1 << 10 + + configPath := t.TempDir() + configFile := "000_a.yaml" + + configDirPath := path.Join(configPath, "conf.d") + require.NoError(t, os.Mkdir(configDirPath, 0o700)) + + configtest.PrepareConfigFiles(t, configPath, []configtest.ConfigFile{ + configtest.NewConfigFile(configFile, m{"a": "000"}, yaml.Marshal), + }) + + // Not valid configs, dummy files those appear lexicographically first. + configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{ + configtest.NewDummyFile("000_file_1", dummyFileSize), + configtest.NewDummyFile("000_file_2", dummyFileSize), + configtest.NewDummyFile("000_file_3", dummyFileSize), + }) + + configtest.PrepareConfigFiles(t, configDirPath, []configtest.ConfigFile{ + // Valid configs with invalid extensions those appear lexicographically first. + configtest.NewConfigFile("001_a.yaml.un~", m{"a": "101"}, yaml.Marshal), + configtest.NewConfigFile("001_b.yml~", m{"b": m{"a": "102", "b": "103"}}, yaml.Marshal), + configtest.NewConfigFile("001_c.yaml.swp", m{"c": m{"a": "104", "b": "105"}}, yaml.Marshal), + configtest.NewConfigFile("001_d.json.swp", m{"d": m{"a": "106", "b": "107"}}, json.Marshal), + + // Valid configs with valid extensions those should be loaded. + configtest.NewConfigFile("010_a.yaml", m{"a": "1"}, yaml.Marshal), + configtest.NewConfigFile("020_b.yml", m{"b": m{"a": "2", "b": "3"}}, yaml.Marshal), + configtest.NewConfigFile("030_c.json", m{"c": m{"a": "4", "b": "5"}}, json.Marshal), + + // Valid configs with invalid extensions those appear lexicographically last. + configtest.NewConfigFile("099_a.yaml.un~", m{"a": "201"}, yaml.Marshal), + configtest.NewConfigFile("099_b.yml~", m{"b": m{"a": "202", "b": "203"}}, yaml.Marshal), + configtest.NewConfigFile("099_c.yaml.swp", m{"c": m{"a": "204", "b": "205"}}, yaml.Marshal), + configtest.NewConfigFile("099_c.json.swp", m{"d": m{"a": "206", "b": "207"}}, json.Marshal), + }) + + // Not valid configs, dummy files those appear lexicographically last. + configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{ + configtest.NewDummyFile("999_file_1", dummyFileSize), + configtest.NewDummyFile("999_file_2", dummyFileSize), + configtest.NewDummyFile("999_file_3", dummyFileSize), + }) + + finalConfig := m{"a": "1", "b": m{"a": "2", "b": "3"}, "c": m{"a": "4", "b": "5"}} + + var ( + v *viper.Viper + err error + ) + + t.Run("create config with config dir only", func(t *testing.T) { + v, err = config.CreateViper( + config.WithConfigDir(configDirPath), + ) + require.NoError(t, err) + assert.Equal(t, finalConfig, v.AllSettings()) + }) + + t.Run("reload config with config dir only", func(t *testing.T) { + err = config.ReloadViper( + config.WithViper(v), + config.WithConfigDir(configDirPath), + ) + require.NoError(t, err) + assert.Equal(t, finalConfig, v.AllSettings()) + }) + + t.Run("create config with both config and config dir", func(t *testing.T) { + v, err = config.CreateViper( + config.WithConfigFile(path.Join(configPath, configFile)), + config.WithConfigDir(configDirPath), + ) + require.NoError(t, err) + assert.Equal(t, finalConfig, v.AllSettings()) + }) + + t.Run("reload config with both config and config dir", func(t *testing.T) { + err = config.ReloadViper( + config.WithViper(v), + config.WithConfigFile(path.Join(configPath, configFile)), + config.WithConfigDir(configDirPath), + ) + require.NoError(t, err) + assert.Equal(t, finalConfig, v.AllSettings()) + }) +} diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index 9e4fa3098..13f447af4 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -26,13 +26,15 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { _ = iota internal aclDenied + apemanagerDenied ) var ( code int - internalErr = new(sdkstatus.ServerInternal) - accessErr = new(sdkstatus.ObjectAccessDenied) + internalErr = new(sdkstatus.ServerInternal) + accessErr = new(sdkstatus.ObjectAccessDenied) + apemanagerErr = new(sdkstatus.APEManagerAccessDenied) ) switch { @@ -41,13 +43,21 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { case errors.As(err, &accessErr): code = aclDenied err = fmt.Errorf("%w: %s", err, accessErr.Reason()) + case errors.As(err, &apemanagerErr): + code = apemanagerDenied + err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason()) default: code = internal } cmd.PrintErrln(err) - if cmd.PersistentPostRun != nil { - cmd.PersistentPostRun(cmd, nil) + for p := cmd; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(cmd, nil) + if !cobra.EnableTraverseRunHooks { + break + } + } } os.Exit(code) } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index 79b03a726..5dd1a060e 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -14,28 +14,28 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, ) { var strState string - switch { + switch node.Status() { default: strState = "STATE_UNSUPPORTED" - case node.IsOnline(): + case netmap.Online: strState = "ONLINE" - case node.IsOffline(): + case netmap.Offline: strState = "OFFLINE" - case node.IsMaintenance(): + case netmap.Maintenance: strState = "MAINTENANCE" } cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - netmap.IterateNetworkEndpoints(node, func(endpoint string) { + for endpoint := range node.NetworkEndpoints() { cmd.Printf("%s ", endpoint) - }) + } cmd.Println() if !short { - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { cmd.Printf("%s\t%s: %s\n", indent, key, value) - }) + } } } diff --git a/config/example/ir.env b/config/example/ir.env index 3f9530ab6..c13044a6e 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,4 +1,7 @@ FROSTFS_IR_LOGGER_LEVEL=info +FROSTFS_IR_LOGGER_TIMESTAMP=true +FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX @@ -79,3 +82,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000 FROSTFS_IR_PROMETHEUS_ENABLED=true FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090 FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s + +FROSTFS_MULTINET_ENABLED=true +FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" +FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" +FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" +FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" +FROSTFS_MULTINET_BALANCER=roundrobin +FROSTFS_MULTINET_RESTRICT=false +FROSTFS_MULTINET_FALLBACK_DELAY=350ms diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 401328e72..ed53f014b 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -2,6 +2,10 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" + timestamp: true + tags: + - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. + level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file @@ -122,3 +126,18 @@ prometheus: systemdnotify: enabled: true + +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms diff --git a/config/example/node.env b/config/example/node.env index 00190eb39..9a2426358 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,5 +1,8 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald +FROSTFS_LOGGER_TIMESTAMP=true +FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 @@ -19,9 +22,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" -FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state +FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true @@ -30,6 +33,7 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32 FROSTFS_TREE_REPLICATION_WORKER_COUNT=32 FROSTFS_TREE_REPLICATION_TIMEOUT=5s FROSTFS_TREE_SYNC_INTERVAL=1h +FROSTFS_TREE_SYNC_BATCH_SIZE=2000 FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" # gRPC section @@ -81,16 +85,21 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s FROSTFS_REPLICATOR_PUT_TIMEOUT=15s FROSTFS_REPLICATOR_POOL_SIZE=10 +# Container service section +FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 + # Object service section -FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 -FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 +FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" + +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 # Storage engine section -FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 -FROSTFS_STORAGE_REBUILD_WORKERS_COUNT=1000 ## 0 shard ### Flag to refill Metabase from BlobStor FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false @@ -105,13 +114,17 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384 FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728 FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30 FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472 +FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096 +FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49 +FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_FLUSHING_OBJECTS_SIZE=100 ### Metabase config FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESS=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 @@ -146,6 +159,54 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 +#### Limits config +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor @@ -197,8 +258,26 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m FROSTFS_TRACING_ENABLED=true FROSTFS_TRACING_ENDPOINT="localhost" FROSTFS_TRACING_EXPORTER="otlp_grpc" +FROSTFS_TRACING_TRUSTED_CA="" +FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0 +FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value +FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1 +FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 # AUDIT section FROSTFS_AUDIT_ENABLED=true + +# MULTINET section +FROSTFS_MULTINET_ENABLED=true +FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" +FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" +FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" +FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" +FROSTFS_MULTINET_BALANCER=roundrobin +FROSTFS_MULTINET_RESTRICT=false +FROSTFS_MULTINET_FALLBACK_DELAY=350ms + +FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" +FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json index 9051d2bb7..6b7a9c2c6 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -1,7 +1,14 @@ { "logger": { "level": "debug", - "destination": "journald" + "destination": "journald", + "timestamp": true, + "tags": [ + { + "names": "main, morph", + "level": "debug" + } + ] }, "pprof": { "enabled": true, @@ -30,13 +37,13 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", - "relay": true, "persistent_sessions": { "path": "/sessions" }, "persistent_state": { "path": "/state" - } + }, + "locode_db_path": "/path/to/locode/db" }, "grpc": { "0": { @@ -68,6 +75,7 @@ "replication_worker_count": 32, "replication_timeout": "5s", "sync_interval": "1h", + "sync_batch_size": 2000, "authorized_keys": [ "0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0", "02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" @@ -122,20 +130,41 @@ "pool_size": 10, "put_timeout": "15s" }, + "container": { + "list_stream": { + "batch_size": "500" + } + }, "object": { "delete": { "tombstone_lifetime": 10 }, "put": { - "remote_pool_size": 100, - "local_pool_size": 200, "skip_session_token_issuer_verification": true + }, + "get": { + "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 1000 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + }, "storage": { - "shard_pool_size": 15, "shard_ro_error_threshold": 100, - "rebuild_workers_count": 1000, "shard": { "0": { "mode": "read-only", @@ -148,7 +177,10 @@ "small_object_size": 16384, "max_object_size": 134217728, "flush_worker_count": 30, - "capacity": 3221225472 + "capacity": 3221225472, + "page_size": 4096, + "max_object_count": 49, + "max_flushing_objects_size": 100 }, "metabase": { "path": "tmp/0/meta", @@ -156,12 +188,15 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compress": true, - "compression_exclude_content_types": [ - "audio/*", "video/*" - ], - "compression_estimate_compressibility": true, - "compression_estimate_compressibility_threshold": 0.7, + "compression": { + "enabled": true, + "level": "fastest", + "exclude_content_types": [ + "audio/*", "video/*" + ], + "estimate_compressibility": true, + "estimate_compressibility_threshold": 0.7 + }, "small_object_size": 102400, "blobstor": [ { @@ -194,6 +229,87 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 + }, + "limits": { + "read": { + "max_running_ops": 10000, + "max_waiting_ops": 1000, + "idle_timeout": "30s", + "tags": [ + { + "tag": "internal", + "weight": 20, + "limit_ops": 0, + "reserved_ops": 1000 + }, + { + "tag": "client", + "weight": 70, + "reserved_ops": 10000 + }, + { + "tag": "background", + "weight": 5, + "limit_ops": 10000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 5, + "limit_ops": 25000 + }, + { + "tag": "policer", + "weight": 5, + "limit_ops": 25000, + "prohibited": true + }, + { + "tag": "treesync", + "weight": 5, + "limit_ops": 25 + } + ] + }, + "write": { + "max_running_ops": 1000, + "max_waiting_ops": 100, + "idle_timeout": "45s", + "tags": [ + { + "tag": "internal", + "weight": 200, + "limit_ops": 0, + "reserved_ops": 100 + }, + { + "tag": "client", + "weight": 700, + "reserved_ops": 1000 + }, + { + "tag": "background", + "weight": 50, + "limit_ops": 1000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "policer", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "treesync", + "weight": 50, + "limit_ops": 100 + } + ] + } } }, "1": { @@ -214,7 +330,9 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compress": false, + "compression": { + "enabled": false + }, "small_object_size": 102400, "blobstor": [ { @@ -252,13 +370,60 @@ }, "tracing": { "enabled": true, - "endpoint": "localhost:9090", - "exporter": "otlp_grpc" + "endpoint": "localhost", + "exporter": "otlp_grpc", + "trusted_ca": "", + "attributes":[ + { + "key": "key0", + "value": "value" + }, + { + "key": "key1", + "value": "value" + } + ] }, "runtime": { "soft_memory_limit": 1073741824 }, "audit": { "enabled": true + }, + "multinet": { + "enabled": true, + "subnets": [ + { + "mask": "192.168.219.174/24", + "source_ips": [ + "192.168.218.185", + "192.168.219.185" + ] + }, + { + "mask": "10.78.70.74/24", + "source_ips":[ + "10.78.70.185", + "10.78.71.185" + ] + } + ], + "balancer": "roundrobin", + "restrict": false, + "fallback_delay": "350ms" + }, + "qos": { + "critical": { + "authorized_keys": [ + "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", + "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" + ] + }, + "internal": { + "authorized_keys": [ + "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", + "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" + ] + } } } diff --git a/config/example/node.yaml b/config/example/node.yaml index bcc8552b3..2d4bc90fb 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -1,6 +1,10 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" + timestamp: true + tags: + - names: "main, morph" + level: debug systemdnotify: enabled: true @@ -30,11 +34,11 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK - relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node + "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server @@ -58,6 +62,7 @@ tree: replication_channel_capacity: 32 replication_timeout: 5s sync_interval: 1h + sync_batch_size: 2000 authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli - 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 - 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56 @@ -77,9 +82,11 @@ contracts: # side chain NEOFS contract script hashes; optional, override values morph: dial_timeout: 30s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. + cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). + # Negative value disables caching. A zero value sets the default value. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. + container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success - address: wss://rpc1.morph.frostfs.info:40341/ws @@ -91,6 +98,9 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 + netmap: + candidates: + poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection @@ -105,19 +115,32 @@ replicator: put_timeout: 15s # timeout for the Replicator PUT remote operation pool_size: 10 # maximum amount of concurrent replications +container: + list_stream: + batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once + object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: - remote_pool_size: 100 # number of async workers for remote PUT operations - local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true + get: + priority: # list of metrics of nodes for prioritization + - $attribute:ClusterName + - $attribute:UN-LOCODE + +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 storage: - # note: shard configuration can be omitted for relay node (see `node.relay`) - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) - rebuild_workers_count: 1000 # count of rebuild storage concurrent workers shard: default: # section with the default shard parameters @@ -130,7 +153,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) + perm: 0o644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -138,18 +161,19 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compress: false # turn on/off zstd(level 3) compression of stored objects + compression: + enabled: false # turn on/off zstd compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -171,18 +195,23 @@ storage: no_sync: true path: tmp/0/cache # write-cache root directory capacity: 3221225472 # approximate write-cache total size, bytes + max_object_count: 49 + page_size: 4k + max_flushing_objects_size: 100b metabase: path: tmp/0/meta # metabase path max_batch_size: 100 max_batch_delay: 10ms - compress: true # turn on/off zstd(level 3) compression of stored objects - compression_exclude_content_types: - - audio/* - - video/* - compression_estimate_compressibility: true - compression_estimate_compressibility_threshold: 0.7 + compression: + enabled: true # turn on/off zstd compression of stored objects + level: fastest + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza @@ -205,6 +234,59 @@ storage: expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector + limits: + read: + max_running_ops: 10000 + max_waiting_ops: 1000 + idle_timeout: 30s + tags: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + prohibited: true + - tag: treesync + weight: 5 + limit_ops: 25 + write: + max_running_ops: 1000 + max_waiting_ops: 100 + idle_timeout: 45s + tags: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 + - tag: treesync + weight: 50 + limit_ops: 100 + 1: writecache: path: tmp/1/cache # write-cache root directory @@ -223,15 +305,46 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0644 # permission to use for the database file and intermediate directories + perm: 0o644 # permission to use for the database file and intermediate directories tracing: enabled: true exporter: "otlp_grpc" endpoint: "localhost" + trusted_ca: "" + attributes: + - key: key0 + value: value + - key: key1 + value: value runtime: soft_memory_limit: 1gb audit: enabled: true + +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms + +qos: + critical: + authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag + - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 + - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/config/mainnet/README.md b/config/mainnet/README.md deleted file mode 100644 index 717a9b0ff..000000000 --- a/config/mainnet/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# N3 Mainnet Storage node configuration - -Here is a template for simple storage node configuration in N3 Mainnet. -Make sure to specify correct values instead of `<...>` placeholders. -Do not change `contracts` section. Run the latest frostfs-node release with -the fixed config `frostfs-node -c config.yml` - -To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. -The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` -(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) - -## Tips - -Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. -```yaml -node: - addresses: - - grpcs://frostfs.my.org:8080 - -grpc: - num: 1 - 0: - endpoint: frostfs.my.org:8080 - tls: - enabled: true - certificate: /path/to/cert - key: /path/to/key -``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml deleted file mode 100644 index d86ea451f..000000000 --- a/config/mainnet/config.yml +++ /dev/null @@ -1,70 +0,0 @@ -node: - wallet: - path: - address: - password: - addresses: - - - attribute_0: UN-LOCODE: - attribute_1: Price:100000 - attribute_2: User-Agent:FrostFS\/0.9999 - -grpc: - num: 1 - 0: - endpoint: - tls: - enabled: false - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/path/metabase - perm: 0600 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m - -logger: - level: info - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -object: - put: - remote_pool_size: 100 - local_pool_size: 100 - -morph: - rpc_endpoint: - - wss://rpc1.morph.frostfs.info:40341/ws - - wss://rpc2.morph.frostfs.info:40341/ws - - wss://rpc3.morph.frostfs.info:40341/ws - - wss://rpc4.morph.frostfs.info:40341/ws - - wss://rpc5.morph.frostfs.info:40341/ws - - wss://rpc6.morph.frostfs.info:40341/ws - - wss://rpc7.morph.frostfs.info:40341/ws - dial_timeout: 20s - -contracts: - balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 - container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 - netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md deleted file mode 100644 index b5faf2b27..000000000 --- a/config/testnet/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# N3 Testnet Storage node configuration - -There is a prepared configuration for NeoFS Storage Node deployment in -N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared -docker image and run it with docker-compose. - -## Build image - -Prepared **frostfs-storage-testnet** image is available at Docker Hub. -However, if you need to rebuild it for some reason, run -`make image-storage-testnet` command. - -``` -$ make image-storage-testnet -... -Successfully built ab0557117b02 -Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 -``` - -## Deploy node - -To run a storage node in N3 Testnet environment, you should deposit GAS assets, -update docker-compose file and start the node. - -### Deposit - -The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a -bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. - -First, obtain GAS in N3 Testnet chain. You can do that with -[faucet](https://neowish.ngd.network) service. - -Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. -You can provide scripthash in the `data` argument of transfer tx to make a -deposit to a specified account. Otherwise, deposit is made to the tx sender. - -NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, -so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. - -See a deposit example with `neo-go`. - -``` -neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ ---from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ ---to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ ---token GAS \ ---amount 1 -``` - -### Configure - -Next, configure `node_config.env` file. Change endpoints values. Both -should contain your **public** IP. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -``` - -Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) -attribute. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED -``` - -You can validate UN/LOCODE attribute in -[NeoFS LOCODE database](https://github.com/TrueCloudLab/frostfs-locode-db/releases/tag/v0.1.0) -with frostfs-cli. - -``` -$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' -Country: Russia -Location: Saint Petersburg (ex Leningrad) -Continent: Europe -Subdivision: [SPE] Sankt-Peterburg -Coordinates: 59.53, 30.15 -``` - -It is recommended to pass the node's key as a file. To do so, convert your wallet -WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. - -``` -// Print WIF in a 32-byte hex format -$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 -PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 -WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ -ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc -ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf - -// Save 32-byte hex into a file -$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key -``` - -Then, specify the path to this file in `docker-compose.yml` -```yaml - volumes: - - frostfs_storage:/storage - - ./my_wallet.key:/node.key -``` - - -NeoFS objects will be stored on your machine. By default, docker-compose -is configured to store objects in named docker volume `frostfs_storage`. You can -specify a directory on the filesystem to store objects there. - -```yaml - volumes: - - /home/username/frostfs/rc3/storage:/storage - - ./my_wallet.key:/node.key -``` - -### Start - -Run the node with `docker-compose up` command and stop it with `docker-compose down`. - -### Debug - -To print node logs, use `docker logs frostfs-testnet`. To print debug messages in -log, set up log level to debug with this env: - -```yaml - environment: - - NEOFS_LOGGER_LEVEL=debug -``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml deleted file mode 100644 index 76b36cdf6..000000000 --- a/config/testnet/config.yml +++ /dev/null @@ -1,52 +0,0 @@ -logger: - level: info - -morph: - rpc_endpoint: - - wss://rpc01.morph.testnet.frostfs.info:51331/ws - - wss://rpc02.morph.testnet.frostfs.info:51331/ws - - wss://rpc03.morph.testnet.frostfs.info:51331/ws - - wss://rpc04.morph.testnet.frostfs.info:51331/ws - - wss://rpc05.morph.testnet.frostfs.info:51331/ws - - wss://rpc06.morph.testnet.frostfs.info:51331/ws - - wss://rpc07.morph.testnet.frostfs.info:51331/ws - dial_timeout: 20s - -contracts: - balance: e0420c216003747626670d1424569c17c79015bf - container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 - netmap: d4b331639799e2958d4bc5b711b469d79de94e01 - -node: - key: /node.key - attribute_0: Deployed:SelfHosted - attribute_1: User-Agent:FrostFS\/0.9999 - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/metabase - perm: 0777 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 47328c419..000000000 --- a/debian/changelog +++ /dev/null @@ -1,5 +0,0 @@ -frostfs-node (0.0.1) stable; urgency=medium - - * Initial package build - - -- TrueCloudLab Tue, 25 Oct 2022 21:10:49 +0300 diff --git a/debian/clean b/debian/clean deleted file mode 100644 index 44dc05e0a..000000000 --- a/debian/clean +++ /dev/null @@ -1,2 +0,0 @@ -man/ -debian/*.bash-completion diff --git a/debian/control b/debian/control deleted file mode 100644 index f3f214bca..000000000 --- a/debian/control +++ /dev/null @@ -1,39 +0,0 @@ -Source: frostfs-node -Section: misc -Priority: optional -Maintainer: TrueCloudLab -Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts -Standards-Version: 4.5.1 -Homepage: https://fs.neo.org/ -Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git -Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node - -Package: frostfs-storage -Architecture: any -Depends: ${misc:Depends} -Description: FrostFS Storage node - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. - -Package: frostfs-ir -Architecture: any -Depends: ${misc:Depends}, frostfs-locode-db -Description: FrostFS InnerRing node - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. - -Package: frostfs-cli -Architecture: any -Depends: ${misc:Depends} -Description: CLI tools for FrostFS - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index 61dab665d..000000000 --- a/debian/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: frostfs-node -Upstream-Contact: tech@frostfs.info -Source: https://git.frostfs.info/TrueCloudLab/frostfs-node - -Files: * -Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project - 2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project - (https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md) - -License: GPL-3 - This program is free software: you can redistribute it and/or modify it - under the terms of the GNU General Public License as published - by the Free Software Foundation; version 3. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program or at /usr/share/common-licenses/GPL-3 - If not, see . diff --git a/debian/frostfs-cli.docs b/debian/frostfs-cli.docs deleted file mode 100644 index 58d4559cc..000000000 --- a/debian/frostfs-cli.docs +++ /dev/null @@ -1,4 +0,0 @@ -CONTRIBUTING.md -CREDITS.md -README.md -cmd/frostfs-adm/docs diff --git a/debian/frostfs-cli.install b/debian/frostfs-cli.install deleted file mode 100644 index 93025187b..000000000 --- a/debian/frostfs-cli.install +++ /dev/null @@ -1,3 +0,0 @@ -bin/frostfs-adm usr/bin -bin/frostfs-cli usr/bin -bin/frostfs-lens usr/bin diff --git a/debian/frostfs-cli.manpages b/debian/frostfs-cli.manpages deleted file mode 100644 index 85c5e001d..000000000 --- a/debian/frostfs-cli.manpages +++ /dev/null @@ -1 +0,0 @@ -man/* diff --git a/debian/frostfs-ir.dirs b/debian/frostfs-ir.dirs deleted file mode 100644 index 90da8fd27..000000000 --- a/debian/frostfs-ir.dirs +++ /dev/null @@ -1,2 +0,0 @@ -/etc/frostfs/ir -/var/lib/frostfs/ir diff --git a/debian/frostfs-ir.docs b/debian/frostfs-ir.docs deleted file mode 100644 index 38b0cef26..000000000 --- a/debian/frostfs-ir.docs +++ /dev/null @@ -1,3 +0,0 @@ -CONTRIBUTING.md -CREDITS.md -README.md diff --git a/debian/frostfs-ir.install b/debian/frostfs-ir.install deleted file mode 100644 index e052f5434..000000000 --- a/debian/frostfs-ir.install +++ /dev/null @@ -1 +0,0 @@ -bin/frostfs-ir usr/bin diff --git a/debian/frostfs-ir.postinst b/debian/frostfs-ir.postinst deleted file mode 100755 index eb9d381c9..000000000 --- a/debian/frostfs-ir.postinst +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -case "$1" in - configure) - USERNAME=ir - id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir - if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME - chmod -f 0750 /etc/frostfs/$USERNAME - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml - chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true - chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true - fi - USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)" - if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then - chown -f frostfs-$USERNAME: "$USERDIR" - fi - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.postrm b/debian/frostfs-ir.postrm deleted file mode 100755 index cbb7db2f2..000000000 --- a/debian/frostfs-ir.postrm +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - purge) - rm -rf /var/lib/frostfs/ir/* - ;; - - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.preinst b/debian/frostfs-ir.preinst deleted file mode 100755 index 37f952537..000000000 --- a/debian/frostfs-ir.preinst +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.prerm b/debian/frostfs-ir.prerm deleted file mode 100755 index 0da369d75..000000000 --- a/debian/frostfs-ir.prerm +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.service b/debian/frostfs-ir.service deleted file mode 100644 index 304017f68..000000000 --- a/debian/frostfs-ir.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=FrostFS InnerRing node -Requires=network.target - -[Service] -Type=notify -NotifyAccess=all -ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml -User=frostfs-ir -Group=frostfs-ir -WorkingDirectory=/var/lib/frostfs/ir -Restart=always -RestartSec=5 -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/debian/frostfs-storage.dirs b/debian/frostfs-storage.dirs deleted file mode 100644 index 4142145ee..000000000 --- a/debian/frostfs-storage.dirs +++ /dev/null @@ -1,3 +0,0 @@ -/etc/frostfs/storage -/srv/frostfs -/var/lib/frostfs/storage diff --git a/debian/frostfs-storage.docs b/debian/frostfs-storage.docs deleted file mode 100644 index cd1f5f23f..000000000 --- a/debian/frostfs-storage.docs +++ /dev/null @@ -1,4 +0,0 @@ -docs/storage-node-configuration.md -CONTRIBUTING.md -CREDITS.md -README.md diff --git a/debian/frostfs-storage.install b/debian/frostfs-storage.install deleted file mode 100644 index 670935e7b..000000000 --- a/debian/frostfs-storage.install +++ /dev/null @@ -1 +0,0 @@ -bin/frostfs-node usr/bin diff --git a/debian/frostfs-storage.postinst b/debian/frostfs-storage.postinst deleted file mode 100755 index 88fa53be5..000000000 --- a/debian/frostfs-storage.postinst +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -case "$1" in - configure) - USERNAME=storage - id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME - if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME - chmod -f 0750 /etc/frostfs/$USERNAME - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml - chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true - chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true - fi - USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6) - if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then - chown -f frostfs-$USERNAME: "$USERDIR" - fi - USERDIR=/srv/frostfs - if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then - chown -f frostfs-$USERNAME: $USERDIR - fi - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.postrm b/debian/frostfs-storage.postrm deleted file mode 100755 index d9c8c9656..000000000 --- a/debian/frostfs-storage.postrm +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - purge) - rm -rf /var/lib/frostfs/storage/* - ;; - - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.preinst b/debian/frostfs-storage.preinst deleted file mode 100755 index 37f952537..000000000 --- a/debian/frostfs-storage.preinst +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.prerm b/debian/frostfs-storage.prerm deleted file mode 100755 index 0da369d75..000000000 --- a/debian/frostfs-storage.prerm +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.service b/debian/frostfs-storage.service deleted file mode 100644 index 573961756..000000000 --- a/debian/frostfs-storage.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=FrostFS Storage node -Requires=network.target - -[Service] -Type=notify -NotifyAccess=all -ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml -User=frostfs-storage -Group=frostfs-storage -WorkingDirectory=/srv/frostfs -Restart=always -RestartSec=5 -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 0dd8ee399..000000000 --- a/debian/rules +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/make -f - -# Do not try to strip Go binaries -export DEB_BUILD_OPTIONS := nostrip - -%: - dh $@ --with bash-completion - -override_dh_auto_test: - -override_dh_auto_install: - echo $(DEB_BUILD_OPTIONS) - dh_auto_install - - bin/frostfs-adm gendoc --type man man/ - bin/frostfs-cli gendoc --type man man/ - - bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion - bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion - install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/ - install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/ - bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish - bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm - bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish - bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli - - install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml - install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml - install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml - install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml - -override_dh_installsystemd: - dh_installsystemd --no-enable --no-start --name=frostfs-ir - dh_installsystemd --no-enable --no-start --name=frostfs-storage - -override_dh_installchangelogs: - dh_installchangelogs -k CHANGELOG.md - -override_dh_installdocs: - dh_installdocs diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8d8..000000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json index 990fd42a8..b68ce4fa3 100644 --- a/dev/.vscode-example/launch.json +++ b/dev/.vscode-example/launch.json @@ -42,7 +42,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", @@ -78,7 +77,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080" }, "postDebugTask": "env-down" }, @@ -93,7 +97,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", @@ -129,7 +132,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082" }, "postDebugTask": "env-down" }, @@ -144,7 +152,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", @@ -180,7 +187,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084" }, "postDebugTask": "env-down" }, @@ -195,7 +207,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", @@ -231,7 +242,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086" }, "postDebugTask": "env-down" } diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml index 9d026797c..40ed35aeb 100644 --- a/dev/docker-compose.yml +++ b/dev/docker-compose.yml @@ -3,7 +3,7 @@ version: "2.4" services: neo-go: - image: nspccdev/neo-go:0.105.0 + image: nspccdev/neo-go:0.106.0 container_name: neo-go command: ["node", "--config-path", "/config", "--privnet", "--debug"] stop_signal: SIGKILL @@ -14,3 +14,15 @@ services: - ./neo-go/node-wallet.json:/wallets/node-wallet.json - ./neo-go/config.yml:/wallets/config.yml - ./neo-go/wallet.json:/wallets/wallet.json + jaeger: + image: jaegertracing/all-in-one:latest + container_name: jaeger + ports: + - '4317:4317' #OTLP over gRPC + - '4318:4318' #OTLP over HTTP + - '16686:16686' #frontend + stop_signal: SIGKILL + environment: + - COLLECTOR_OTLP_ENABLED=true + - SPAN_STORAGE_TYPE=badger + - BADGER_EPHEMERAL=true diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md deleted file mode 100644 index 26a77a27f..000000000 --- a/docs/building-deb-package.md +++ /dev/null @@ -1,46 +0,0 @@ -# Building Debian package on host - -## Prerequisites - -For now, we're assuming building for Debian 11 (stable) x86_64. - -Go version 18.4 or later should already be installed, i.e. this runs -successfully: - -* `make all` - -## Installing packaging dependencies - -```shell -$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts -``` - -Warining: number of package installed is pretty large considering dependecies. - -## Package building - -```shell -$ make debpackage -``` - -## Leftovers cleaning - -```shell -$ make debclean -``` -or -```shell -$ dh clean -``` - -# Package versioning - -By default, package version is based on product version and may also contain git -tags and hashes. - -Package version could be overwritten by setting `PKG_VERSION` variable before -build, Debian package versioning rules should be respected. - -```shell -$ PKG_VERSION=0.32.0 make debpackge -``` diff --git a/docs/evacuation.md b/docs/evacuation.md index 9db514a9e..d47d56d15 100644 --- a/docs/evacuation.md +++ b/docs/evacuation.md @@ -10,11 +10,22 @@ First of all, by the evacuation the data is transferred to other shards of the s Only one running evacuation process is allowed on the node at a time. +It is not necessary to turn maintenance mode on storage node. + +Once evacuation from shard started, it is impossible to read data from it via public API, except the case when evacuation stopped manually or node restarted. + +Because it is necessary to prevent removing by policer objects with policy `REP 1 ...` from remote node during evacuation. + `frostfs-cli` utility is used to manage evacuation. ## Commands -`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`). +`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. +By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`). +To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`. +To adjust resource consumption required for evacuation use options: + - `--container-worker-count` count of concurrent container evacuation workers + - `--object-worker-count` count of concurrent object evacuation workers `frostfs-cli control shards evacuation stop` stops running evacuation process. diff --git a/docs/release-instruction.md b/docs/release-instruction.md index ec7b8cdf3..aa867e83c 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -9,7 +9,7 @@ These should run successfully: * `make lint` (should not change any files); * `make fmts` (should not change any files); * `go mod tidy` (should not change any files); -* integration tests in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv). +* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env). ## Make release commit @@ -43,11 +43,6 @@ Write new revision number into the root `VERSION` file: $ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION ``` -Update version in Debian package changelog file -``` -$ cat debian/changelog -``` - Update the supported version of `TrueCloudLab/frostfs-contract` module in root `README.md` if needed. @@ -60,7 +55,7 @@ Add an entry to the `CHANGELOG.md` following the style established there. * update `Unreleased...new` and `new...old` diff-links at the bottom of the file * add optional codename and release date in the heading * remove all empty sections such as `Added`, `Removed`, etc. -* make sure all changes have references to GitHub issues in `#123` format (if possible) +* make sure all changes have references to relevant issues in `#123` format (if possible) * clean up all `Unreleased` sections and leave them empty ### Make release commit @@ -100,35 +95,31 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} ## Post-release -### Prepare and push images to a Docker Hub (if not automated) +### Prepare and push images to a Docker registry (automated) -Create Docker images for all applications and push them into Docker Hub -(requires [organization](https://hub.docker.com/u/truecloudlab) privileges) +Create Docker images for all applications and push them into container registry +(executed automatically in Forgejo Actions upon pushing a release tag): ```shell $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ make images -$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION} +$ make push-images ``` -### Make a proper GitHub release (if not automated) +### Make a proper release (if not automated) -Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`. +Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`. Build and tar release binaries with `make prepare-release`, attach them to the release. Publish the release. ### Update FrostFS Developer Environment -Prepare pull-request in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv) +Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env) with new versions. -### Close GitHub milestone +### Close milestone -Look up GitHub [milestones](https://github.com/TrueCloudLab/frostfs-node/milestones) and close the release one if exists. +Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists. ### Rebuild FrostFS LOCODE database diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 3b459335b..6cc4ab13c 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,10 +51,7 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -Shard can automatically switch to a `degraded-read-only` mode in 3 cases: -1. If the metabase was not available or couldn't be opened/initialized during shard startup. -2. If shard error counter exceeds threshold. -3. If the metabase couldn't be reopened during SIGHUP handling. +A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. # Detach shard diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 4a6e5ba6d..da9fdfed0 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,21 +12,23 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|------------------------|---------------------------------------------------------------------| -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | - +| Section | Description | +|--------------|---------------------------------------------------------| +| `node` | [Node parameters](#node-section) | +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -110,11 +112,21 @@ Contains logger parameters. ```yaml logger: level: info + tags: + - names: "main, morph" + level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| Parameter | Type | Default value | Description | +|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | + +## `tags` subsection +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | +| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract @@ -147,15 +159,19 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m + netmap: + candidates: + poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| Parameter | Type | Default value | Description | +|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | @@ -169,7 +185,6 @@ Local storage engine configuration. | Parameter | Type | Default value | Description | |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| -| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | @@ -180,20 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `compress` | `bool` | `false` | Flag to enable compression. | -| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| Parameter | Type | Default value | Description | +| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | +| `compression` | [Compression config](#compression-subsection) | | Compression config. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | + +### `compression` subsection + +Contains compression config. + +```yaml +compression: + enabled: true + level: smallest_size + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 +``` + +| Parameter | Type | Default value | Description | +| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | `false` | Flag to enable compression. | +| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | +| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | ### `blobstor` subsection @@ -208,7 +244,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0644 + perm: 0o644 size: 4194304 depth: 1 width: 4 @@ -268,7 +304,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0644 + perm: 0o644 max_batch_size: 200 max_batch_delay: 20ms ``` @@ -287,21 +323,78 @@ writecache: enabled: true path: /path/to/writecache capacity: 4294967296 - small_object_size: 16384 max_object_size: 134217728 flush_worker_count: 30 ``` -| Parameter | Type | Default value | Description | -|----------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------| -| `path` | `string` | | Path to the metabase file. | -| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. | -| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. | -| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. | -| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | -| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. | -| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. | +| Parameter | Type | Default value | Description | +| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `path` | `string` | | Path to the metabase file. | +| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. | +| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. | +| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. | +| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | +| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | +### `limits` subsection + +```yaml +limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 +``` + +| Parameter | Type | Default value | Description | +| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | +| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | +| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | +| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `read` | `[]tag` | empty | Array of shard read settings for tags. | +| `write` | `[]tag` | empty | Array of shard write settings for tags. | +| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | +| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | +| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | +| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | +| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section @@ -317,22 +410,22 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" - relay: false persistent_sessions: path: /sessions persistent_state: path: /state + locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `relay` | `bool` | | Enable relay mode. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | ## `wallet` subsection N3 wallet configuration. @@ -397,25 +490,46 @@ replicator: pool_size: 10 ``` -| Parameter | Type | Default value | Description | -|---------------|------------|----------------------------------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. | +| Parameter | Type | Default value | Description | +|---------------|------------|---------------|---------------------------------------------| +| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | +| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | # `object` section Contains object-service related parameters. ```yaml object: - put: - remote_pool_size: 100 + get: + priority: + - $attribute:ClusterName ``` -| Parameter | Type | Default value | Description | -|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------| -| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | +| Parameter | Type | Default value | Description | +|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| +| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | +| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | + + +# `rpc` section +Contains limits on the number of active RPC for specified method(s). + +```yaml +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 +``` + +| Parameter | Type | Default value | Description | +|------------------|------------|---------------|--------------------------------------------------------------| +| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | +| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | # `runtime` section Contains runtime parameters. @@ -437,6 +551,52 @@ audit: enabled: true ``` -| Parameter | Type | Default value | Description | -|---------------------|--------|---------------|---------------------------------------------------| -| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. | +| Parameter | Type | Default value | Description | +|-----------|--------|---------------|---------------------------------------------------| +| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. | + + +# `multinet` section +Contains multinet parameters. + +```yaml +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms +``` + +| Parameter | Type | Default value | Description | +| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | false | If `true` then source-based routing is enabled. | +| `subnets` | `subnet` | empty | Resulting subnets. | +| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | +| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | +| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | + +# `qos` section +```yaml +qos: + critical: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 +``` +| Parameter | Type | Default value | Description | +| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | +| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | +| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md index f99225046..195e0c6b3 100644 --- a/docs/update-go-instruction.md +++ b/docs/update-go-instruction.md @@ -7,7 +7,7 @@ ## Update CI Change Golang versions for unit test in CI. -There is `go` section in `.github/workflows/go.yaml` file: +There is `go` section in `.forgejo/workflows/*.yml` files: ```yaml jobs: test: diff --git a/go.mod b/go.mod index ee8b1bb16..6f1950936 100644 --- a/go.mod +++ b/go.mod @@ -1,91 +1,96 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.21 +go 1.23.0 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa git.frostfs.info/TrueCloudLab/hrw v1.2.1 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984 + git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 + github.com/VictoriaMetrics/easyproto v0.1.4 github.com/cheggaaa/pb v1.0.29 github.com/chzyer/readline v1.5.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/felixge/fgprof v0.9.5 github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 + github.com/gdamore/tcell/v2 v2.7.4 github.com/go-pkgz/expirable-cache/v3 v3.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.4 - github.com/mitchellh/go-homedir v1.1.0 + github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.12.1 - github.com/nspcc-dev/neo-go v0.106.0 + github.com/multiformats/go-multiaddr v0.15.0 + github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 - github.com/paulmach/orb v0.11.0 github.com/prometheus/client_golang v1.19.0 + github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 github.com/spf13/cast v1.6.0 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.18.2 + github.com/spf13/viper v1.19.0 github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.9.0 - go.etcd.io/bbolt v1.3.9 - go.opentelemetry.io/otel v1.22.0 - go.opentelemetry.io/otel/trace v1.22.0 + go.etcd.io/bbolt v1.3.10 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 - google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.33.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + golang.org/x/term v0.30.0 + google.golang.org/grpc v1.69.2 + google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect ) require ( git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davidmz/go-pageant v1.0.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gdamore/encoding v1.0.0 // indirect github.com/go-fed/httpsig v1.1.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -96,14 +101,14 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect - github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect + github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect github.com/nspcc-dev/rfc6979 v0.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -111,21 +116,22 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/twmb/murmur3 v1.1.8 // indirect github.com/urfave/cli v1.22.14 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/sdk v1.22.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18 +replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 diff --git a/go.sum b/go.sum index c7c3b87eb..5b075f60a 100644 --- a/go.sum +++ b/go.sum @@ -1,21 +1,25 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3 h1:H5GvrVlowIMWfzqQkhY0p0myooJxQ1sMRVSFfXawwWg= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de h1:OjsWY0jpGJV1t87XgwL/3PsDx7fJ6lfNMXtY8UhoUbM= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18 h1:JRjwcHaQajTbSCBCK3yZnqvyHvgWBaoThDGuT4kvIIc= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984 h1:O3F2Grz07RWZ68mRz1xsYsNPNvZLwY00BM+xoYb1kNk= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= +git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= +git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= +git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= +git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= @@ -23,18 +27,23 @@ git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4= git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc= +github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -49,8 +58,8 @@ github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw= github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -62,6 +71,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3 github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI= github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -71,17 +82,23 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU= +github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw= github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -91,17 +108,19 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -111,8 +130,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tg github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -123,30 +142,28 @@ github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -164,22 +181,19 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= +github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -188,8 +202,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk= github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= +github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q= +github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM= github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -208,14 +222,11 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo= github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= -github.com/paulmach/orb v0.11.0 h1:JfVXJUBeH9ifc/OrhBY0lL16QsmPgpCHMlqSSYhcgAA= -github.com/paulmach/orb v0.11.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= -github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -227,15 +238,18 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY= +github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= @@ -246,19 +260,20 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU= github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -271,41 +286,32 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 h1:zr8ymM5OWWjjiWRzwTfZ67c905+2TMHYp2lMJ52QTyM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0/go.mod h1:sQs7FT2iLVJ+67vYngGJkPe1qr39IzaBzaj9IDNNY8k= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -317,44 +323,38 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -377,48 +377,47 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -427,11 +426,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -448,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/assert/cond.go b/internal/assert/cond.go new file mode 100644 index 000000000..113d2eba9 --- /dev/null +++ b/internal/assert/cond.go @@ -0,0 +1,29 @@ +package assert + +import ( + "fmt" + "strings" +) + +func True(cond bool, details ...string) { + if !cond { + panic(strings.Join(details, " ")) + } +} + +func False(cond bool, details ...string) { + if cond { + panic(strings.Join(details, " ")) + } +} + +func NoError(err error, details ...string) { + if err != nil { + content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) + panic(content) + } +} + +func Fail(details ...string) { + panic(strings.Join(details, " ")) +} diff --git a/internal/audit/request.go b/internal/audit/request.go index cf0797300..17666ab4b 100644 --- a/internal/audit/request.go +++ b/internal/audit/request.go @@ -1,10 +1,12 @@ package audit import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "context" + crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" ) @@ -17,15 +19,15 @@ type Target interface { String() string } -func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) { +func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) { var key []byte if req != nil { key = req.GetVerificationHeader().GetBodySignature().GetKey() } - LogRequestWithKey(log, operation, key, target, status) + LogRequestWithKey(ctx, log, operation, key, target, status) } -func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) { +func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) { object, subject := NotDefined, NotDefined publicKey := crypto.UnmarshalPublicKey(key) @@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target object = target.String() } - log.Info(logs.AuditEventLogRecord, + log.Info(ctx, logs.AuditEventLogRecord, zap.String("operation", operation), zap.String("object", object), zap.String("subject", subject), diff --git a/internal/audit/target.go b/internal/audit/target.go index 8bc87ee8e..2d6881e29 100644 --- a/internal/audit/target.go +++ b/internal/audit/target.go @@ -3,7 +3,7 @@ package audit import ( "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 67f173f29..626372f43 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -14,13 +14,9 @@ const ( InterruptPlacementIterationByContext = "interrupt placement iteration by context" Notification = "notification" - - SkipDeprecatedNotification = "skip deprecated notification" ) const ( - InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" - InnerringCantStopEpochEstimation = "can't stop epoch estimation" InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" @@ -41,8 +37,6 @@ const ( InnerringCantUpdatePersistentState = "can't update persistent state" InnerringCloserError = "closer error" InnerringReadConfigFromBlockchain = "read config from blockchain" - NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" - NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" PolicerCouldNotGetContainer = "could not get container" PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal" PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" @@ -61,7 +55,6 @@ const ( ReplicatorCouldNotReplicateObject = "could not replicate object" ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" TreeRedirectingTreeServiceQuery = "redirecting tree service query" - TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" TreeSynchronizeTree = "synchronize tree" @@ -107,7 +100,6 @@ const ( GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed" GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object" GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object" - GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object" GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object" GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" GetAssemblingECObjectCompleted = "assembling erasure-coded object completed" @@ -133,7 +125,6 @@ const ( SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" SearchLocalOperationFailed = "local operation failed" UtilObjectServiceError = "object service error" - UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" @@ -148,14 +139,12 @@ const ( ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" ClientNotaryRequestInvoked = "notary request invoked" ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted" - ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted = "attempt to wait for notary deposit transaction to get persisted" ClientNeoClientInvoke = "neo client invoke" ClientNativeGasTransferInvoke = "native gas transfer invoke" ClientBatchGasTransferInvoke = "batch gas transfer invoke" ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" - EventCouldNotStartListenToEvents = "could not start listen to events" EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" @@ -173,17 +162,9 @@ const ( EventNotaryParserNotSet = "notary parser not set" EventCouldNotParseNotaryEvent = "could not parse notary event" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" - EventIgnoreNilEventParser = "ignore nil event parser" - EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" EventRegisteredNewEventParser = "registered new event parser" - EventIgnoreNilEventHandler = "ignore nil event handler" - EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" EventRegisteredNewEventHandler = "registered new event handler" - EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" - EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" - EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" - EventIgnoreNilBlockHandler = "ignore nil block handler" StorageOperation = "local object storage operation" BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" BlobovniczaOpeningBoltDB = "opening BoltDB" @@ -217,6 +198,7 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" + EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" @@ -226,12 +208,6 @@ const ( EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" EngineObjectIsMovedToAnotherShard = "object is moved to another shard" - MetabaseMissingMatcher = "missing matcher" - MetabaseErrorInFKBTSelection = "error in FKBT selection" - MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" - MetabaseUnknownOperation = "unknown operation" - MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" - MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" MetabaseCheckingMetabaseVersion = "checking metabase version" @@ -249,6 +225,7 @@ const ( ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" ShardCouldNotUnmarshalObject = "could not unmarshal object" + ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted" ShardCouldNotCloseShardComponent = "could not close shard component" ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" @@ -276,9 +253,8 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" - WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" + ShardCouldNotFindObject = "could not find object" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" - WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" @@ -313,9 +289,6 @@ const ( ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" ContainerDeleteContainerCheckFailed = "delete container check failed" ContainerCouldNotApproveDeleteContainer = "could not approve delete container" - ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" - ContainerSetEACLCheckFailed = "set EACL check failed" - ContainerCouldNotApproveSetEACL = "could not approve set EACL" FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" FrostFSCantRelaySetConfigEvent = "can't relay set config event" FrostFSFrostfsWorkerPool = "frostfs worker pool" @@ -360,7 +333,6 @@ const ( NetmapCantGetTransactionHeight = "can't get transaction height" NetmapCantResetEpochTimer = "can't reset epoch timer" NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" - NetmapCantStartContainerSizeEstimation = "can't start container size estimation" NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" NetmapNextEpoch = "next epoch" NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" @@ -412,7 +384,6 @@ const ( FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeConfigurationReading = "configuration reading" - FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" @@ -421,11 +392,6 @@ const ( FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" - FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" - FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" - FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" - FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" - FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" FrostFSNodeFailedInitTracing = "failed init tracing" @@ -469,7 +435,6 @@ const ( FSTreeCantUnmarshalObject = "can't unmarshal an object" FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor" FSTreeCantUpdateID = "can't update object storage ID" - FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB" PutSingleRedirectFailure = "failed to redirect PutSingle request" StorageIDRetrievalFailure = "can't get storage ID from metabase" ObjectRemovalFailureBlobStor = "can't remove object from blobStor" @@ -539,4 +504,18 @@ const ( PolicerCouldNotGetChunk = "could not get EC chunk" PolicerCouldNotGetChunks = "could not get EC chunks" AuditEventLogRecord = "audit event log record" + StartedWritecacheSealAsync = "started writecache seal async" + WritecacheSealCompletedAsync = "writecache seal completed successfully" + FailedToSealWritecacheAsync = "failed to seal writecache async" + WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty" + BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" + WritecacheCantGetObject = "can't get an object from fstree" + FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" + FailedToParseIncomingIOTag = "failed to parse incoming IO tag" + NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" + FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" + WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" + FailedToUpdateNetmapCandidates = "update netmap candidates failed" + UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/internal/metrics/application.go b/internal/metrics/application.go index 8bc408ab6..53acf9b7f 100644 --- a/internal/metrics/application.go +++ b/internal/metrics/application.go @@ -12,8 +12,9 @@ type ApplicationInfo struct { func NewApplicationInfo(version string) *ApplicationInfo { appInfo := &ApplicationInfo{ versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Name: "app_info", - Help: "General information about the application.", + Namespace: namespace, + Name: "app_info", + Help: "General information about the application.", }, []string{"version"}), } appInfo.versionValue.With(prometheus.Labels{"version": version}) diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index 3aa51c0f0..9123541ff 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -22,6 +22,8 @@ const ( grpcServerSubsystem = "grpc_server" policerSubsystem = "policer" commonCacheSubsystem = "common_cache" + multinetSubsystem = "multinet" + qosSubsystem = "qos" successLabel = "success" shardIDLabel = "shard_id" @@ -41,6 +43,8 @@ const ( endpointLabel = "endpoint" hitLabel = "hit" cacheLabel = "cache" + sourceIPLabel = "source_ip" + ioTagLabel = "io_tag" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go index e37777e40..1d01c95ed 100644 --- a/internal/metrics/engine.go +++ b/internal/metrics/engine.go @@ -27,6 +27,7 @@ type EngineMetrics interface { IncRefillObjectsCount(shardID, path string, size int, success bool) SetRefillPercent(shardID, path string, percent uint32) SetRefillStatus(shardID, path, status string) + SetEvacuationInProgress(shardID string, value bool) WriteCache() WriteCacheMetrics GC() GCMetrics @@ -45,6 +46,7 @@ type engineMetrics struct { refillObjCounter *prometheus.GaugeVec refillPayloadCounter *prometheus.GaugeVec refillPercentCounter *prometheus.GaugeVec + evacuationInProgress *shardIDModeValue gc *gcMetrics writeCache *writeCacheMetrics @@ -72,6 +74,7 @@ func newEngineMetrics() *engineMetrics { refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}), refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}), refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}), + evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"), } } @@ -124,6 +127,7 @@ func (m *engineMetrics) DeleteShardMetrics(shardID string) { m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) m.mode.Delete(shardID) m.refillStatus.DeleteByShardID(shardID) + m.evacuationInProgress.Delete(shardID) } func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) { @@ -213,3 +217,7 @@ func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) { func (m *engineMetrics) SetRefillStatus(shardID, path, status string) { m.refillStatus.SetMode(shardID, path, status) } + +func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) { + m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value)) +} diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go index f6b14a632..d0cb8131f 100644 --- a/internal/metrics/innerring.go +++ b/internal/metrics/innerring.go @@ -17,7 +17,9 @@ type InnerRingServiceMetrics struct { eventDuration *prometheus.HistogramVec morphCacheMetrics *morphCacheMetrics logMetrics logger.LogMetrics - appInfo *ApplicationInfo + multinet *multinetMetrics + // nolint: unused + appInfo *ApplicationInfo } // NewInnerRingMetrics returns new instance of metrics collectors for inner ring. @@ -50,6 +52,7 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics { morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace), appInfo: NewApplicationInfo(misc.Version), logMetrics: logger.NewLogMetrics(innerRingNamespace), + multinet: newMultinetMetrics(innerRingNamespace), } } @@ -77,3 +80,7 @@ func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics { func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics { return m.logMetrics } + +func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics { + return m.multinet +} diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go new file mode 100644 index 000000000..6b1f99d46 --- /dev/null +++ b/internal/metrics/multinet.go @@ -0,0 +1,35 @@ +package metrics + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type multinetMetrics struct { + dials *prometheus.GaugeVec +} + +type MultinetMetrics interface { + Dial(sourceIP string, success bool) +} + +func newMultinetMetrics(ns string) *multinetMetrics { + return &multinetMetrics{ + dials: metrics.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: multinetSubsystem, + Name: "dial_count", + Help: "Dials count performed by multinet", + }, []string{sourceIPLabel, successLabel}), + } +} + +func (m *multinetMetrics) Dial(sourceIP string, success bool) { + m.dials.With(prometheus.Labels{ + sourceIPLabel: sourceIP, + successLabel: strconv.FormatBool(success), + }).Inc() +} diff --git a/internal/metrics/node.go b/internal/metrics/node.go index d9e401446..8ade19eb2 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -25,7 +25,10 @@ type NodeMetrics struct { morphClient *morphClientMetrics morphCache *morphCacheMetrics log logger.LogMetrics - appInfo *ApplicationInfo + multinet *multinetMetrics + qos *QoSMetrics + // nolint: unused + appInfo *ApplicationInfo } func NewNodeMetrics() *NodeMetrics { @@ -52,6 +55,8 @@ func NewNodeMetrics() *NodeMetrics { morphCache: newMorphCacheMetrics(namespace), log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), + multinet: newMultinetMetrics(namespace), + qos: newQoSMetrics(), } } @@ -119,3 +124,11 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics { func (m *NodeMetrics) LogMetrics() logger.LogMetrics { return m.log } + +func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { + return m.multinet +} + +func (m *NodeMetrics) QoSMetrics() *QoSMetrics { + return m.qos +} diff --git a/internal/metrics/object.go b/internal/metrics/object.go index 0ba994ed3..e4f6dfde1 100644 --- a/internal/metrics/object.go +++ b/internal/metrics/object.go @@ -9,13 +9,14 @@ import ( ) type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool) + AddRequestDuration(method string, d time.Duration, success bool, ioTag string) AddPayloadSize(method string, size int) } type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec + methodDuration *prometheus.HistogramVec + payloadCounter *prometheus.CounterVec + ioTagOpsCounter *prometheus.CounterVec } func newObjectServiceMetrics() *objectServiceMetrics { @@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics { Name: "request_payload_bytes", Help: "Object Service request payload", }, []string{methodLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: objectSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) { +func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { m.methodDuration.With(prometheus.Labels{ methodLabel: method, successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: method, + }).Inc() } func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go new file mode 100644 index 000000000..be6878142 --- /dev/null +++ b/internal/metrics/qos.go @@ -0,0 +1,52 @@ +package metrics + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type QoSMetrics struct { + opsCounter *prometheus.GaugeVec +} + +func newQoSMetrics() *QoSMetrics { + return &QoSMetrics{ + opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: qosSubsystem, + Name: "operations_total", + Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", + }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), + } +} + +func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "pending", + }).Set(float64(pending)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "in_progress", + }).Set(float64(inProgress)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "completed", + }).Set(float64(completed)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "resource_exhausted", + }).Set(float64(resourceExhausted)) +} + +func (m *QoSMetrics) Close(shardID string) { + m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) +} diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go index 6702aa83c..e192c4398 100644 --- a/internal/metrics/treeservice.go +++ b/internal/metrics/treeservice.go @@ -12,12 +12,14 @@ type TreeMetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type treeServiceMetrics struct { replicateTaskDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec + ioTagOpsCounter *prometheus.CounterVec } var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) @@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics { Name: "sync_duration_seconds", Help: "Duration of synchronization operations", }, []string{successLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: treeServiceSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } @@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) } + +func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: op, + }).Inc() +} diff --git a/internal/net/config.go b/internal/net/config.go new file mode 100644 index 000000000..b84ac3b35 --- /dev/null +++ b/internal/net/config.go @@ -0,0 +1,69 @@ +package net + +import ( + "errors" + "fmt" + "net/netip" + "slices" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + "git.frostfs.info/TrueCloudLab/multinet" +) + +var errEmptySourceIPList = errors.New("empty source IP list") + +type Subnet struct { + Prefix string + SourceIPs []string +} + +type Config struct { + Enabled bool + Subnets []Subnet + Balancer string + Restrict bool + FallbackDelay time.Duration + Metrics metrics.MultinetMetrics +} + +func (c Config) toMultinetConfig() (multinet.Config, error) { + var subnets []multinet.Subnet + for _, s := range c.Subnets { + var ms multinet.Subnet + p, err := netip.ParsePrefix(s.Prefix) + if err != nil { + return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err) + } + ms.Prefix = p + for _, ip := range s.SourceIPs { + addr, err := netip.ParseAddr(ip) + if err != nil { + return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err) + } + ms.SourceIPs = append(ms.SourceIPs, addr) + } + if len(ms.SourceIPs) == 0 { + return multinet.Config{}, errEmptySourceIPList + } + subnets = append(subnets, ms) + } + return multinet.Config{ + Subnets: subnets, + Balancer: multinet.BalancerType(c.Balancer), + Restrict: c.Restrict, + FallbackDelay: c.FallbackDelay, + Dialer: newDefaulDialer(), + EventHandler: newEventHandler(c.Metrics), + }, nil +} + +func (c Config) equals(other Config) bool { + return c.Enabled == other.Enabled && + slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool { + return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs) + }) && + c.Balancer == other.Balancer && + c.Restrict == other.Restrict && + c.FallbackDelay == other.FallbackDelay +} diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go new file mode 100644 index 000000000..6265f1860 --- /dev/null +++ b/internal/net/dial_target.go @@ -0,0 +1,54 @@ +// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go + +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package net + +import ( + "net/url" + "strings" +) + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/internal/net/dialer.go b/internal/net/dialer.go new file mode 100644 index 000000000..daf0f815f --- /dev/null +++ b/internal/net/dialer.go @@ -0,0 +1,39 @@ +package net + +import ( + "context" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +type Dialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) { + return d.DialContext(ctx, "tcp", address) +} + +func newDefaulDialer() net.Dialer { + // From `grpc.WithContextDialer` comment: + // + // Note: All supported releases of Go (as of December 2023) override the OS + // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive + // with OS defaults for keepalive time and interval, use a net.Dialer that sets + // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket + // option to true from the Control field. For a concrete example of how to do + // this, see internal.NetDialerWithTCPKeepalive(). + // + // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432 + return net.Dialer{ + KeepAlive: time.Duration(-1), + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go new file mode 100644 index 000000000..3d94dedc7 --- /dev/null +++ b/internal/net/dialer_source.go @@ -0,0 +1,83 @@ +package net + +import ( + "context" + "net" + "sync" + + "git.frostfs.info/TrueCloudLab/multinet" +) + +type DialerSource struct { + guard sync.RWMutex + + c Config + + md multinet.Dialer +} + +func NewDialerSource(c Config) (*DialerSource, error) { + result := &DialerSource{} + if err := result.build(c); err != nil { + return nil, err + } + return result, nil +} + +func (s *DialerSource) build(c Config) error { + if c.Enabled { + mc, err := c.toMultinetConfig() + if err != nil { + return err + } + md, err := multinet.NewDialer(mc) + if err != nil { + return err + } + s.md = md + s.c = c + return nil + } + s.md = nil + s.c = c + return nil +} + +// GrpcContextDialer returns grpc.WithContextDialer func. +// Returns nil if multinet disabled. +func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) { + s.guard.RLock() + defer s.guard.RUnlock() + + if s.c.Enabled { + return func(ctx context.Context, address string) (net.Conn, error) { + network, address := parseDialTarget(address) + return s.md.DialContext(ctx, network, address) + } + } + return nil +} + +// NetContextDialer returns net.DialContext dial function. +// Returns nil if multinet disabled. +func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { + s.guard.RLock() + defer s.guard.RUnlock() + + if s.c.Enabled { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return s.md.DialContext(ctx, network, address) + } + } + return nil +} + +func (s *DialerSource) Update(c Config) error { + s.guard.Lock() + defer s.guard.Unlock() + + if s.c.equals(c) { + return nil + } + return s.build(c) +} diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go new file mode 100644 index 000000000..024e5cf7c --- /dev/null +++ b/internal/net/event_handler.go @@ -0,0 +1,29 @@ +package net + +import ( + "net" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + "git.frostfs.info/TrueCloudLab/multinet" +) + +var _ multinet.EventHandler = (*metricsEventHandler)(nil) + +type metricsEventHandler struct { + m metrics.MultinetMetrics +} + +func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) { + sourceIPString := "undefined" + if sourceIP != nil { + sourceIPString = sourceIP.Network() + "://" + sourceIP.String() + } + m.m.Dial(sourceIPString, err == nil) +} + +func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler { + if m == nil { + return nil + } + return &metricsEventHandler{m: m} +} diff --git a/internal/qos/config.go b/internal/qos/config.go new file mode 100644 index 000000000..d90b403b5 --- /dev/null +++ b/internal/qos/config.go @@ -0,0 +1,31 @@ +package qos + +import ( + "math" + "time" +) + +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + +type LimiterConfig struct { + Read OpConfig + Write OpConfig +} + +type OpConfig struct { + MaxWaitingOps int64 + MaxRunningOps int64 + IdleTimeout time.Duration + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 + Prohibited bool +} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go new file mode 100644 index 000000000..58cd9e52c --- /dev/null +++ b/internal/qos/grpc.go @@ -0,0 +1,86 @@ +package qos + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "google.golang.org/grpc" +) + +func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) + return handler(ctx, req) + } +} + +func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return invoker(ctx, method, req, reply, cc, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return invoker(ctx, method, req, reply, cc, opts...) + } +} + +func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return streamer(ctx, desc, cc, method, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return streamer(ctx, desc, cc, method, opts...) + } +} + +func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { + return handler(ctx, req) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return nil, new(apistatus.ResourceExhausted) + } + defer release() + + return handler(ctx, req) + } +} + +//nolint:contextcheck (grpc.ServerStream manages the context itself) +func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { + return handler(srv, ss) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return new(apistatus.ResourceExhausted) + } + defer release() + + return handler(srv, ss) + } +} diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go new file mode 100644 index 000000000..7d0826754 --- /dev/null +++ b/internal/qos/grpc_test.go @@ -0,0 +1,219 @@ +package qos_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +const ( + okKey = "ok" +) + +var ( + errTest = errors.New("mock") + errWrongTag = errors.New("wrong tag") + errNoTag = errors.New("failed to get tag from context") + errResExhausted *apistatus.ResourceExhausted + tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} +) + +type mockGRPCServerStream struct { + grpc.ServerStream + + ctx context.Context +} + +func (m *mockGRPCServerStream) Context() context.Context { + return m.ctx +} + +type limiter struct { + acquired bool + released bool +} + +func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { + l.acquired = true + if key != okKey { + return nil, false + } + return func() { l.released = true }, true +} + +func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(ctx context.Context, req any) (any, error) { + return nil, errTest + } + _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) + return err +} + +func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(srv any, stream grpc.ServerStream) error { + return errTest + } + err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ + FullMethod: methodName, + }, handler) + return err +} + +func Test_MaxActiveRPCLimiter(t *testing.T) { + // UnaryServerInterceptor + t.Run("unary fail", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := unaryMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) + // StreamServerInterceptor + t.Run("stream fail", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := streamMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) +} + +func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { + interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() + called := false + handler := func(ctx context.Context, req any) (any, error) { + called = true + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { + return nil, nil + } + return nil, errWrongTag + } + _, err := interceptor(context.Background(), nil, nil, handler) + require.NoError(t, err) + require.True(t, called) +} + +func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() + + // check context with no value + called := false + invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil + } + require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) + require.True(t, called) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return errNoTag + } + if raw != targetTag { + return errWrongTag + } + return nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) +} + +func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() + + // check context with no value + called := false + streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil, nil + } + _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) + require.True(t, called) + require.NoError(t, err) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return nil, errNoTag + } + if raw != targetTag { + return nil, errWrongTag + } + return nil, nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + _, err := interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + _, err = interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go new file mode 100644 index 000000000..2d7de32fc --- /dev/null +++ b/internal/qos/limiter.go @@ -0,0 +1,246 @@ +package qos + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" +) + +const ( + defaultIdleTimeout time.Duration = 0 + defaultShare float64 = 1.0 + minusOne = ^uint64(0) + + defaultMetricsCollectTimeout = 5 * time.Second +) + +type ReleaseFunc scheduling.ReleaseFunc + +type Limiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) + SetParentID(string) + SetMetrics(Metrics) + Close() +} + +type scheduler interface { + RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) + Close() +} + +func NewLimiter(c LimiterConfig) (Limiter, error) { + if err := c.Validate(); err != nil { + return nil, err + } + readScheduler, err := createScheduler(c.Read) + if err != nil { + return nil, fmt.Errorf("create read scheduler: %w", err) + } + writeScheduler, err := createScheduler(c.Write) + if err != nil { + return nil, fmt.Errorf("create write scheduler: %w", err) + } + l := &mClockLimiter{ + readScheduler: readScheduler, + writeScheduler: writeScheduler, + closeCh: make(chan struct{}), + wg: &sync.WaitGroup{}, + readStats: createStats(), + writeStats: createStats(), + } + l.shardID.Store(&shardID{}) + l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) + l.startMetricsCollect() + return l, nil +} + +func createScheduler(config OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { + return newSemaphoreScheduler(config.MaxRunningOps), nil + } + return scheduling.NewMClock( + uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), + converToSchedulingTags(config.Tags), config.IdleTimeout) +} + +func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { + result := make(map[string]scheduling.TagInfo) + for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { + result[tag.String()] = scheduling.TagInfo{ + Share: defaultShare, + } + } + for _, l := range limits { + v := result[l.Tag] + if l.Weight != nil && *l.Weight != 0 { + v.Share = *l.Weight + } + if l.LimitOps != nil && *l.LimitOps != 0 { + v.LimitIOPS = l.LimitOps + } + if l.ReservedOps != nil && *l.ReservedOps != 0 { + v.ReservedIOPS = l.ReservedOps + } + v.Prohibited = l.Prohibited + result[l.Tag] = v + } + return result +} + +var ( + _ Limiter = (*noopLimiter)(nil) + releaseStub ReleaseFunc = func() {} + noopLimiterInstance = &noopLimiter{} +) + +func NewNoopLimiter() Limiter { + return noopLimiterInstance +} + +type noopLimiter struct{} + +func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) SetParentID(string) {} + +func (n *noopLimiter) Close() {} + +func (n *noopLimiter) SetMetrics(Metrics) {} + +var _ Limiter = (*mClockLimiter)(nil) + +type shardID struct { + id string +} + +type mClockLimiter struct { + readScheduler scheduler + writeScheduler scheduler + + readStats map[string]*stat + writeStats map[string]*stat + + shardID atomic.Pointer[shardID] + metrics atomic.Pointer[metricsHolder] + closeCh chan struct{} + wg *sync.WaitGroup +} + +func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.readScheduler, n.readStats) +} + +func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.writeScheduler, n.writeStats) +} + +func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = IOTagClient.String() + } + stat := getStat(tag, stats) + stat.pending.Add(1) + if tag == IOTagCritical.String() { + stat.inProgress.Add(1) + return func() { + stat.completed.Add(1) + }, nil + } + rel, err := s.RequestArrival(ctx, tag) + stat.inProgress.Add(1) + if err != nil { + if isResourceExhaustedErr(err) { + stat.resourceExhausted.Add(1) + return nil, &apistatus.ResourceExhausted{} + } + stat.completed.Add(1) + return nil, err + } + return func() { + rel() + stat.completed.Add(1) + }, nil +} + +func (n *mClockLimiter) Close() { + n.readScheduler.Close() + n.writeScheduler.Close() + close(n.closeCh) + n.wg.Wait() + n.metrics.Load().metrics.Close(n.shardID.Load().id) +} + +func (n *mClockLimiter) SetParentID(parentID string) { + n.shardID.Store(&shardID{id: parentID}) +} + +func (n *mClockLimiter) SetMetrics(m Metrics) { + n.metrics.Store(&metricsHolder{metrics: m}) +} + +func (n *mClockLimiter) startMetricsCollect() { + n.wg.Add(1) + go func() { + defer n.wg.Done() + + ticker := time.NewTicker(defaultMetricsCollectTimeout) + defer ticker.Stop() + for { + select { + case <-n.closeCh: + return + case <-ticker.C: + shardID := n.shardID.Load().id + if shardID == "" { + continue + } + metrics := n.metrics.Load().metrics + exportMetrics(metrics, n.readStats, shardID, "read") + exportMetrics(metrics, n.writeStats, shardID, "write") + } + } + }() +} + +func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { + var pending uint64 + var inProgress uint64 + var completed uint64 + var resExh uint64 + for tag, s := range stats { + pending = s.pending.Load() + inProgress = s.inProgress.Load() + completed = s.completed.Load() + resExh = s.resourceExhausted.Load() + if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { + continue + } + metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) + } +} + +func isResourceExhaustedErr(err error) bool { + return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) || + errors.Is(err, scheduling.ErrTagRequestsProhibited) +} diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go new file mode 100644 index 000000000..c00da51b7 --- /dev/null +++ b/internal/qos/metrics.go @@ -0,0 +1,31 @@ +package qos + +import "sync/atomic" + +type Metrics interface { + SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) + Close(shardID string) +} + +var _ Metrics = (*noopMetrics)(nil) + +type noopMetrics struct{} + +func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { +} + +func (n *noopMetrics) Close(string) {} + +// stat presents limiter statistics cumulative counters. +// +// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. +type stat struct { + completed atomic.Uint64 + pending atomic.Uint64 + resourceExhausted atomic.Uint64 + inProgress atomic.Uint64 +} + +type metricsHolder struct { + metrics Metrics +} diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go new file mode 100644 index 000000000..74e6928f3 --- /dev/null +++ b/internal/qos/semaphore.go @@ -0,0 +1,39 @@ +package qos + +import ( + "context" + "errors" + + qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" +) + +var ( + _ scheduler = (*semaphore)(nil) + errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") +) + +type semaphore struct { + s *qosSemaphore.Semaphore +} + +func newSemaphoreScheduler(size int64) *semaphore { + return &semaphore{ + s: qosSemaphore.NewSemaphore(size), + } +} + +func (s *semaphore) Close() {} + +func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if s.s.Acquire() { + return s.s.Release, nil + } + return nil, errSemaphoreLimitExceeded +} diff --git a/internal/qos/stats.go b/internal/qos/stats.go new file mode 100644 index 000000000..3ecfad9f9 --- /dev/null +++ b/internal/qos/stats.go @@ -0,0 +1,29 @@ +package qos + +const unknownStatsTag = "unknown" + +var statTags = map[string]struct{}{ + IOTagBackground.String(): {}, + IOTagClient.String(): {}, + IOTagCritical.String(): {}, + IOTagInternal.String(): {}, + IOTagPolicer.String(): {}, + IOTagTreeSync.String(): {}, + IOTagWritecache.String(): {}, + unknownStatsTag: {}, +} + +func createStats() map[string]*stat { + result := make(map[string]*stat) + for tag := range statTags { + result[tag] = &stat{} + } + return result +} + +func getStat(tag string, stats map[string]*stat) *stat { + if v, ok := stats[tag]; ok { + return v + } + return stats[unknownStatsTag] +} diff --git a/internal/qos/tags.go b/internal/qos/tags.go new file mode 100644 index 000000000..e3f7cafd6 --- /dev/null +++ b/internal/qos/tags.go @@ -0,0 +1,59 @@ +package qos + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" +) + +type IOTag string + +const ( + IOTagBackground IOTag = "background" + IOTagClient IOTag = "client" + IOTagCritical IOTag = "critical" + IOTagInternal IOTag = "internal" + IOTagPolicer IOTag = "policer" + IOTagTreeSync IOTag = "treesync" + IOTagWritecache IOTag = "writecache" + + ioTagUnknown IOTag = "" +) + +func FromRawString(s string) (IOTag, error) { + switch s { + case string(IOTagBackground): + return IOTagBackground, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagInternal): + return IOTagInternal, nil + case string(IOTagPolicer): + return IOTagPolicer, nil + case string(IOTagTreeSync): + return IOTagTreeSync, nil + case string(IOTagWritecache): + return IOTagWritecache, nil + default: + return ioTagUnknown, fmt.Errorf("unknown tag %s", s) + } +} + +func (t IOTag) String() string { + return string(t) +} + +func IOTagFromContext(ctx context.Context) string { + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = "undefined" + } + return tag +} + +func (t IOTag) IsLocal() bool { + return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go new file mode 100644 index 000000000..70f1f24e8 --- /dev/null +++ b/internal/qos/validate.go @@ -0,0 +1,91 @@ +package qos + +import ( + "errors" + "fmt" + "math" +) + +var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") + +type tagConfig struct { + Shares, Limit, Reserved *float64 +} + +func (c *LimiterConfig) Validate() error { + if err := validateOpConfig(c.Read); err != nil { + return fmt.Errorf("limits 'read' section validation error: %w", err) + } + if err := validateOpConfig(c.Write); err != nil { + return fmt.Errorf("limits 'write' section validation error: %w", err) + } + return nil +} + +func validateOpConfig(c OpConfig) error { + if c.MaxRunningOps <= 0 { + return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) + } + if c.MaxWaitingOps <= 0 { + return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) + } + if c.IdleTimeout <= 0 { + return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) + } + if err := validateTags(c.Tags); err != nil { + return fmt.Errorf("'tags' config section validation error: %w", err) + } + return nil +} + +func validateTags(configTags []IOTagConfig) error { + tags := map[IOTag]tagConfig{ + IOTagBackground: {}, + IOTagClient: {}, + IOTagInternal: {}, + IOTagPolicer: {}, + IOTagTreeSync: {}, + IOTagWritecache: {}, + } + for _, t := range configTags { + tag, err := FromRawString(t.Tag) + if err != nil { + return fmt.Errorf("invalid tag %s: %w", t.Tag, err) + } + if _, ok := tags[tag]; !ok { + return fmt.Errorf("tag %s is not configurable", t.Tag) + } + tags[tag] = tagConfig{ + Shares: t.Weight, + Limit: t.LimitOps, + Reserved: t.ReservedOps, + } + } + idx := 0 + var shares float64 + for t, v := range tags { + if idx == 0 { + idx++ + shares = float64Value(v.Shares) + } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { + return errWeightsMustBeSpecified + } + if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { + return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) + } + if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { + return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) + } + } + return nil +} + +func float64Value(f *float64) float64 { + if f == nil { + return 0.0 + } + return *f +} diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go index e547701fb..590b7a885 100644 --- a/pkg/ape/chainbase/option.go +++ b/pkg/ape/chainbase/option.go @@ -5,9 +5,7 @@ import ( "os" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.etcd.io/bbolt" - "go.uber.org/zap" ) type Option func(*cfg) @@ -18,7 +16,6 @@ type cfg struct { noSync bool maxBatchDelay time.Duration maxBatchSize int - log *logger.Logger } func defaultCfg() *cfg { @@ -26,7 +23,6 @@ func defaultCfg() *cfg { perm: os.ModePerm, maxBatchDelay: bbolt.DefaultMaxBatchDelay, maxBatchSize: bbolt.DefaultMaxBatchSize, - log: &logger.Logger{Logger: zap.L()}, } } @@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option { c.maxBatchSize = maxBatchSize } } - -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go index 953b91a79..8cbb1cce9 100644 --- a/pkg/ape/contract_storage/proxy.go +++ b/pkg/ape/contract_storage/proxy.go @@ -31,9 +31,7 @@ type RPCActorProvider interface { type ProxyVerificationContractStorage struct { rpcActorProvider RPCActorProvider - acc *wallet.Account - - proxyScriptHash util.Uint160 + cosigners []actor.SignerAccount policyScriptHash util.Uint160 } @@ -41,12 +39,27 @@ type ProxyVerificationContractStorage struct { var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { + acc := wallet.NewAccountFromPrivateKey(key) return &ProxyVerificationContractStorage{ rpcActorProvider: rpcActorProvider, - acc: wallet.NewAccountFromPrivateKey(key), - - proxyScriptHash: proxyScriptHash, + cosigners: []actor.SignerAccount{ + { + Signer: transaction.Signer{ + Account: proxyScriptHash, + Scopes: transaction.CustomContracts, + AllowedContracts: []util.Uint160{policyScriptHash}, + }, + Account: notary.FakeContractAccount(proxyScriptHash), + }, + { + Signer: transaction.Signer{ + Account: acc.Contract.ScriptHash(), + Scopes: transaction.CalledByEntry, + }, + Account: acc, + }, + }, policyScriptHash: policyScriptHash, } @@ -64,7 +77,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke { func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash)) + act, err := actor.New(rpcActor, contractStorage.cosigners) if err != nil { return nil, err } @@ -98,31 +111,16 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na // ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but - // ProxyVerificationContractStorage does not manage reconnections. - contractStorageActor, err := contractStorage.newContractStorageActor() - if err != nil { - return nil, err - } - return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) + rpcActor := contractStorage.rpcActorProvider.GetRPCActor() + inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} + return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) } -func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount { - return []actor.SignerAccount{ - { - Signer: transaction.Signer{ - Account: proxyScriptHash, - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{policyScriptHash}, - }, - Account: notary.FakeContractAccount(proxyScriptHash), - }, - { - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }, - } +type invokerAdapter struct { + *invoker.Invoker + rpcInvoker invoker.RPCInvoke +} + +func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { + return n.rpcInvoker } diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go index c0413678d..d32bd4a07 100644 --- a/pkg/ape/request/frostfsid.go +++ b/pkg/ape/request/frostfsid.go @@ -1,6 +1,7 @@ package request import ( + "context" "fmt" "strconv" "strings" @@ -12,9 +13,9 @@ import ( ) // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. -func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { +func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { reqProps := make(map[string]string) - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) @@ -36,8 +37,8 @@ func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvide } // Groups return the actor's group ids from frostfsid contract. -func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) +func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go index 8c92901f2..98bdf99e7 100644 --- a/pkg/core/client/client.go +++ b/pkg/core/client/client.go @@ -3,15 +3,14 @@ package client import ( "context" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" ) // Client is an interface of FrostFS storage // node's client. type Client interface { - ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error) ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error) ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error) ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error) diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index d4bc0cf68..91ee5c6c3 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,6 +3,7 @@ package client import ( "bytes" "fmt" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -19,7 +20,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - IterateAddresses(func(string) bool) + Addresses() iter.Seq[string] NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go index 8e0aaebb9..8c14bdf5e 100644 --- a/pkg/core/container/delete.go +++ b/pkg/core/container/delete.go @@ -1,7 +1,7 @@ package container import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" ) diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go new file mode 100644 index 000000000..1c52d93e7 --- /dev/null +++ b/pkg/core/container/info.go @@ -0,0 +1,104 @@ +package container + +import ( + "context" + "sync" + + utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" +) + +type Info struct { + Indexed bool + Removed bool +} + +type infoValue struct { + info Info + err error +} + +type InfoProvider interface { + Info(ctx context.Context, id cid.ID) (Info, error) +} + +type infoProvider struct { + mtx *sync.RWMutex + cache map[cid.ID]infoValue + kl *utilSync.KeyLocker[cid.ID] + + source Source + sourceErr error + sourceOnce *sync.Once + sourceFactory func() (Source, error) +} + +func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { + return &infoProvider{ + mtx: &sync.RWMutex{}, + cache: make(map[cid.ID]infoValue), + sourceOnce: &sync.Once{}, + kl: utilSync.NewKeyLocker[cid.ID](), + sourceFactory: sourceFactory, + } +} + +func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { + v, found := r.tryGetFromCache(id) + if found { + return v.info, v.err + } + + return r.getFromSource(ctx, id) +} + +func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + value, found := r.cache[id] + return value, found +} + +func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { + r.kl.Lock(id) + defer r.kl.Unlock(id) + + if v, ok := r.tryGetFromCache(id); ok { + return v.info, v.err + } + + r.sourceOnce.Do(func() { + r.source, r.sourceErr = r.sourceFactory() + }) + if r.sourceErr != nil { + return Info{}, r.sourceErr + } + + cnr, err := r.source.Get(ctx, id) + var civ infoValue + if err != nil { + if client.IsErrContainerNotFound(err) { + removed, err := WasRemoved(ctx, r.source, id) + if err != nil { + civ.err = err + } else { + civ.info.Removed = removed + } + } else { + civ.err = err + } + } else { + civ.info.Indexed = IsIndexedContainer(cnr.Value) + } + r.putToCache(id, civ) + return civ.info, civ.err +} + +func (r *infoProvider) putToCache(id cid.ID, ct infoValue) { + r.mtx.Lock() + defer r.mtx.Unlock() + + r.cache[id] = ct +} diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go index 69854f495..4eb14e53c 100644 --- a/pkg/core/container/storage.go +++ b/pkg/core/container/storage.go @@ -1,6 +1,8 @@ package container import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -41,9 +43,9 @@ type Source interface { // // Implementations must not retain the container pointer and modify // the container through it. - Get(cid.ID) (*Container, error) + Get(ctx context.Context, cid cid.ID) (*Container, error) - DeletionInfo(cid.ID) (*DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) } // EACL groups information about the FrostFS container's extended ACL stored in @@ -58,16 +60,3 @@ type EACL struct { // Session within which Value was set. Nil means session absence. Session *session.Container } - -// EACLSource is the interface that wraps -// basic methods of extended ACL table source. -type EACLSource interface { - // GetEACL reads the table from the source by identifier. - // It returns any error encountered. - // - // GetEACL must return exactly one non-nil value. - // - // Must return apistatus.ErrEACLNotFound if requested - // eACL table is not in source. - GetEACL(cid.ID) (*EACL, error) -} diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index 98919284e..61c568052 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -1,16 +1,18 @@ package container import ( + "context" "errors" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" ) // WasRemoved checks whether the container ever existed or // it just has not been created yet at the current epoch. -func WasRemoved(s Source, cid cid.ID) (bool, error) { - _, err := s.DeletionInfo(cid) +func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { + _, err := s.DeletionInfo(ctx, cid) if err == nil { return true, nil } @@ -20,3 +22,14 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) { } return false, err } + +// IsIndexedContainer returns True if container attributes should be indexed. +func IsIndexedContainer(cnr containerSDK.Container) bool { + var isS3Container bool + for key := range cnr.Attributes() { + if key == ".s3-location-constraint" { + isS3Container = true + } + } + return !isS3Container +} diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go index ecfd0eb15..e752043d3 100644 --- a/pkg/core/frostfsid/subject_provider.go +++ b/pkg/core/frostfsid/subject_provider.go @@ -1,6 +1,8 @@ package frostfsid import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -11,6 +13,6 @@ const ( // SubjectProvider interface provides methods to get subject from FrostfsID contract. type SubjectProvider interface { - GetSubject(util.Uint160) (*client.Subject, error) - GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error) + GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) + GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) } diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go index 29cb2dc94..0c64bb798 100644 --- a/pkg/core/netmap/keys.go +++ b/pkg/core/netmap/keys.go @@ -2,6 +2,6 @@ package netmap // AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes. type AnnouncedKeys interface { - // Checks if the key was announced by a local node. + // IsLocalKey checks if the key was announced by a local node. IsLocalKey(key []byte) bool } diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index b0c9e1f9e..e58e42634 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,6 +1,10 @@ package netmap -import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +import ( + "iter" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -14,10 +18,20 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } +// Addresses returns an iterator over all announced network addresses. +func (x Node) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() +} + // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. +// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) + for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { + if f(s) { + return + } + } } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go index 7770c61c7..97313da84 100644 --- a/pkg/core/netmap/storage.go +++ b/pkg/core/netmap/storage.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -16,7 +18,7 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMap(diff uint64) (*netmap.NetMap, error) + GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) // GetNetMapByEpoch reads network map by the epoch number from the storage. // It returns the pointer to the requested network map and any error encountered. @@ -25,21 +27,21 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) // Epoch reads the current epoch from the storage. // It returns thw number of the current epoch and any error encountered. // // Must return exactly one non-default value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } // GetLatestNetworkMap requests and returns the latest network map from the storage. -func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(0) +func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 0) } // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. -func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(1) +func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 1) } diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 96f721806..cf090eb37 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -8,11 +8,11 @@ import ( "fmt" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u } if !unprepared { - if err := v.validateSignatureKey(obj); err != nil { + if err := v.validateSignatureKey(ctx, obj); err != nil { return fmt.Errorf("(%T) could not validate signature key: %w", v, err) } @@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u return nil } -func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { +func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error { sig := obj.Signature() if sig == nil { return errMissingSignature @@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { ownerID := obj.OwnerID() if token == nil && obj.ECHeader() != nil { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { } if v.verifyTokenIssuer { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { return nil } -func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { +func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { cnrID, containerIDSet := obj.ContainerID() if !containerIDSet { return acl.RoleOthers, errNilCID @@ -199,12 +199,12 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [ cnrIDBin := make([]byte, sha256.Size) cnrID.Encode(cnrIDBin) - cnr, err := v.containers.Get(cnrID) + cnr, err := v.containers.Get(ctx, cnrID) if err != nil { return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } - res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value) + res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value) if err != nil { return acl.RoleOthers, err } @@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) { for _, a := range obj.Attributes() { - if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS { + if a.Key() != objectV2.SysAttributeExpEpoch { continue } diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 77afbfc45..dc336eb34 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -7,9 +7,10 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" @@ -65,7 +66,7 @@ func TestFormatValidator_Validate(t *testing.T) { epoch: curEpoch, }), WithLockSource(ls), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) ownerKey, err := keys.NewPrivateKey() @@ -290,7 +291,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }), WithLockSource(ls), WithVerifySessionTokenIssuer(false), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) tok := sessiontest.Object() @@ -339,7 +340,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) tok := sessiontest.Object() @@ -410,14 +411,14 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.NoError(t, v.Validate(context.Background(), obj, false)) @@ -483,15 +484,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.NoError(t, v.Validate(context.Background(), obj, false)) @@ -559,15 +560,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.Error(t, v.Validate(context.Background(), obj, false)) @@ -578,7 +579,7 @@ type testIRSource struct { irNodes [][]byte } -func (s *testIRSource) InnerRingKeys() ([][]byte, error) { +func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.irNodes, nil } @@ -586,36 +587,13 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } - -type testNetmapSource struct { - netmaps map[uint64]*netmap.NetMap - currentEpoch uint64 -} - -func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) { - if diff >= s.currentEpoch { - return nil, fmt.Errorf("invalid diff") - } - return s.GetNetMapByEpoch(s.currentEpoch - diff) -} - -func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, fmt.Errorf("netmap not found") -} - -func (s *testNetmapSource) Epoch() (uint64, error) { - return s.currentEpoch, nil -} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index 67c9a3188..aab12ebf9 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,6 +13,13 @@ type ECInfo struct { Total uint32 } +func (v *ECInfo) String() string { + if v == nil { + return "" + } + return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) +} + // Info groups object address with its FrostFS // object info. type Info struct { @@ -23,5 +30,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) + return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index 13d0ebfb1..3733ed507 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -2,6 +2,7 @@ package object import ( "bytes" + "context" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -17,7 +18,7 @@ import ( ) type InnerRing interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type SenderClassifier struct { @@ -40,6 +41,7 @@ type ClassifyResult struct { } func (c SenderClassifier) Classify( + ctx context.Context, ownerID *user.ID, ownerKey *keys.PublicKey, idCnr cid.ID, @@ -57,15 +59,15 @@ func (c SenderClassifier) Classify( }, nil } - return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr) + return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr) } -func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { - isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) +func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { + isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing, - zap.String("error", err.Error())) + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, + zap.Error(err)) } else if isInnerRingNode { return &ClassifyResult{ Role: acl.RoleInnerRing, @@ -76,13 +78,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC binCnr := make([]byte, sha256.Size) idCnr.Encode(binCnr) - isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr) + isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) if err != nil { // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode, - zap.String("error", err.Error())) + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, + zap.Error(err)) } else if isContainerNode { return &ClassifyResult{ Role: acl.RoleContainer, @@ -97,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC }, nil } -func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { - innerRingKeys, err := c.innerRing.InnerRingKeys() +func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { + innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) if err != nil { return false, err } @@ -114,10 +116,11 @@ func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { } func (c SenderClassifier) isContainerKey( + ctx context.Context, owner, idCnr []byte, cnr container.Container, ) (bool, error) { - nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap + nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap if err != nil { return false, err } @@ -131,7 +134,7 @@ func (c SenderClassifier) isContainerKey( // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = core.GetPreviousNetworkMap(c.netmap) + nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) if err != nil { return false, err } diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go index c4de07a5f..dfada764a 100644 --- a/pkg/innerring/bindings.go +++ b/pkg/innerring/bindings.go @@ -8,7 +8,6 @@ type ( // ContractProcessor interface defines functions for binding event producers // such as event.Listener and Timers with contract processor. ContractProcessor interface { - ListenerNotificationParsers() []event.NotificationParserInfo ListenerNotificationHandlers() []event.NotificationHandlerInfo ListenerNotaryParsers() []event.NotaryParserInfo ListenerNotaryHandlers() []event.NotaryHandlerInfo @@ -16,11 +15,6 @@ type ( ) func connectListenerWithProcessor(l event.Listener, p ContractProcessor) { - // register notification parsers - for _, parser := range p.ListenerNotificationParsers() { - l.SetNotificationParser(parser) - } - // register notification handlers for _, handler := range p.ListenerNotificationHandlers() { l.RegisterNotificationHandler(handler) diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index ad69f207b..3f9d8df5f 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -3,14 +3,10 @@ package innerring import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" ) type ( @@ -19,28 +15,12 @@ type ( EpochDuration() uint64 } - alphaState interface { - IsAlphabet() bool - } - newEpochHandler func() - containerEstimationStopper interface { - StopEstimation(p container.StopEstimationPrm) error - } - epochTimerArgs struct { - l *logger.Logger - - alphabetState alphaState - newEpochHandlers []newEpochHandler - cnrWrapper containerEstimationStopper // to invoke stop container estimation - epoch epochState // to specify which epoch to stop, and epoch duration - - stopEstimationDMul uint32 // X: X/Y of epoch in blocks - stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks + epoch epochState // to specify which epoch to stop, and epoch duration } emitTimerArgs struct { @@ -49,7 +29,7 @@ type ( emitDuration uint32 // in blocks } - depositor func() (util.Uint256, error) + depositor func(context.Context) (util.Uint256, error) awaiter func(context.Context, util.Uint256) error ) @@ -74,7 +54,7 @@ func (s *Server) tickTimers(h uint32) { } func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { - epochTimer := timer.NewBlockTimer( + return timer.NewBlockTimer( func() (uint32, error) { return uint32(args.epoch.EpochDuration()), nil }, @@ -84,42 +64,13 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { } }, ) - - // sub-timer for epoch timer to tick stop container estimation events at - // some block in epoch - epochTimer.OnDelta( - args.stopEstimationDMul, - args.stopEstimationDDiv, - func() { - if !args.alphabetState.IsAlphabet() { - args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations) - return - } - - epochN := args.epoch.EpochCounter() - if epochN == 0 { // estimates are invalid in genesis epoch - return - } - - prm := container.StopEstimationPrm{} - prm.SetEpoch(epochN - 1) - - err := args.cnrWrapper.StopEstimation(prm) - if err != nil { - args.l.Warn(logs.InnerringCantStopEpochEstimation, - zap.Uint64("epoch", epochN), - zap.String("error", err.Error())) - } - }) - - return epochTimer } -func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer { +func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer { return timer.NewBlockTimer( timer.StaticBlockMeter(args.emitDuration), func() { - args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{}) + args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{}) }, ) } diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go index 242c0903b..4cbe7e394 100644 --- a/pkg/innerring/blocktimer_test.go +++ b/pkg/innerring/blocktimer_test.go @@ -3,29 +3,20 @@ package innerring import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "github.com/stretchr/testify/require" ) func TestEpochTimer(t *testing.T) { t.Parallel() - alphaState := &testAlphabetState{isAlphabet: true} neh := &testNewEpochHandler{} - cnrStopper := &testContainerEstStopper{} epochState := &testEpochState{ counter: 99, duration: 10, } args := &epochTimerArgs{ - l: test.NewLogger(t), - alphabetState: alphaState, - newEpochHandlers: []newEpochHandler{neh.Handle}, - cnrWrapper: cnrStopper, - epoch: epochState, - stopEstimationDMul: 2, - stopEstimationDDiv: 10, + newEpochHandlers: []newEpochHandler{neh.Handle}, + epoch: epochState, } et := newEpochTimer(args) err := et.Reset() @@ -33,63 +24,43 @@ func TestEpochTimer(t *testing.T) { et.Tick(100) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls") et.Tick(101) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(102) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(103) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") var h uint32 for h = 104; h < 109; h++ { et.Tick(h) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") } et.Tick(109) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(110) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(111) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") et.Tick(112) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") et.Tick(113) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") for h = 114; h < 119; h++ { et.Tick(h) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") } et.Tick(120) require.Equal(t, 2, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet() bool { - return s.isAlphabet } type testNewEpochHandler struct { @@ -100,15 +71,6 @@ func (h *testNewEpochHandler) Handle() { h.called++ } -type testContainerEstStopper struct { - called int -} - -func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error { - s.called++ - return nil -} - type testEpochState struct { counter uint64 duration uint64 diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go index 4a80ebf3b..7deec3f31 100644 --- a/pkg/innerring/fetcher.go +++ b/pkg/innerring/fetcher.go @@ -1,6 +1,8 @@ package innerring import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -47,12 +49,12 @@ type IrFetcherWithoutNotary struct { // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // role in the sidechain. -func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) { - return fN.cli.NeoFSAlphabetList() +func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return fN.cli.NeoFSAlphabetList(ctx) } // InnerRingKeys fetches list of innerring keys from netmap contract // in the sidechain. -func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) { - return f.nm.GetInnerRingList() +func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return f.nm.GetInnerRingList(ctx) } diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go index 45135a57b..439400bac 100644 --- a/pkg/innerring/indexer.go +++ b/pkg/innerring/indexer.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync" "time" @@ -10,7 +11,7 @@ import ( type ( irFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } committeeFetcher interface { @@ -45,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK } } -func (s *innerRingIndexer) update() (ind indexes, err error) { +func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { s.RLock() if time.Since(s.lastAccess) < s.timeout { @@ -62,7 +63,7 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } - innerRing, err := s.irFetcher.InnerRingKeys() + innerRing, err := s.irFetcher.InnerRingKeys(ctx) if err != nil { return indexes{}, err } @@ -81,8 +82,8 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } -func (s *innerRingIndexer) InnerRingIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -90,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex() (int32, error) { return ind.innerRingIndex, nil } -func (s *innerRingIndexer) InnerRingSize() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -99,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize() (int32, error) { return ind.innerRingSize, nil } -func (s *innerRingIndexer) AlphabetIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index 5bc2cc988..f8201b7df 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync/atomic" "testing" @@ -37,15 +38,15 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(2), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(3), size, "invalid IR size") }) @@ -56,11 +57,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(0), idx, "invalid IR index") }) @@ -71,11 +72,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") }) @@ -100,30 +101,30 @@ func TestIndexerCachesIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -132,15 +133,15 @@ func TestIndexerCachesIndexes(t *testing.T) { time.Sleep(2 * time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -165,15 +166,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") @@ -189,15 +190,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer = newInnerRingIndexer(cf, irf, key, time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") } @@ -219,7 +220,7 @@ type testIRFetcher struct { calls atomic.Int32 } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { f.calls.Add(1) return f.keys, f.err } @@ -237,7 +238,7 @@ func BenchmarkKeyPosition(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if keyPosition(key, list) != 5 { b.FailNow() } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 7da0a9794..3d236641e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -35,31 +35,30 @@ import ( "google.golang.org/grpc" ) -func (s *Server) initNetmapProcessor(cfg *viper.Viper, - cnrClient *container.Client, +func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, alphaSync event.Handler, ) error { - locodeValidator, err := s.newLocodeValidator(cfg) - if err != nil { - return err - } + locodeValidator := s.newLocodeValidator(cfg) netSettings := (*networkSettings)(s.netmapClient) var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator netMapCandidateStateValidator.SetNetworkSettings(netSettings) + poolSize := cfg.GetInt("workers.netmap") + s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) + + var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.netmap"), + PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), EpochTimer: s, EpochState: s, AlphabetState: s, CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"), CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"), - ContainerWrapper: cnrClient, NotaryDepositHandler: s.onlyAlphabetEventHandler( s.notaryHandler, ), @@ -99,7 +98,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err)) } mainnetChain.from = fromMainChainBlock @@ -139,12 +138,12 @@ func (s *Server) enableNotarySupport() error { return nil } -func (s *Server) initNotaryConfig() { +func (s *Server) initNotaryConfig(ctx context.Context) { s.mainNotaryConfig = notaryConfigs( !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too ) - s.log.Info(logs.InnerringNotarySupport, + s.log.Info(ctx, logs.InnerringNotarySupport, zap.Bool("sidechain_enabled", true), zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), ) @@ -154,16 +153,15 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli var alphaSync event.Handler if s.withoutMainNet || cfg.GetBool("governance.disable") { - alphaSync = func(event.Event) { - s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled) + alphaSync = func(ctx context.Context, _ event.Event) { + s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled) } } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, FrostFSClient: frostfsCli, - NetmapClient: s.netmapClient, AlphabetState: s, EpochState: s, Voter: s, @@ -199,21 +197,16 @@ func (s *Server) createIRFetcher() irFetcher { return irf } -func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) { +func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) { s.epochTimer = newEpochTimer(&epochTimerArgs{ - l: s.log, - alphabetState: s, - newEpochHandlers: s.newEpochTickHandlers(), - cnrWrapper: morphClients.CnrClient, - epoch: s, - stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"), - stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"), + newEpochHandlers: s.newEpochTickHandlers(ctx), + epoch: s, }) s.addBlockTimer(s.epochTimer) // initialize emission timer - emissionTimer := newEmissionTimer(&emitTimerArgs{ + emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{ ap: s.alphabetProcessor, emitDuration: cfg.GetUint32("timers.emit"), }) @@ -221,18 +214,20 @@ func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) s.addBlockTimer(emissionTimer) } -func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { +func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error { parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) if err != nil { return err } + poolSize := cfg.GetInt("workers.alphabet") + s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize)) // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.alphabet"), + PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, NetmapClient: s.netmapClient, MorphClient: s.morphClient, @@ -247,12 +242,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { return err } -func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { +func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { + poolSize := cfg.GetInt("workers.container") + s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.container"), + PoolSize: poolSize, AlphabetState: s, ContainerClient: cnrClient, MorphClient: cnrClient.Morph(), @@ -266,12 +263,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C return bindMorphProcessor(containerProcessor, s) } -func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { +func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { + poolSize := cfg.GetInt("workers.balance") + s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.balance"), + PoolSize: poolSize, FrostFSClient: frostfsCli, BalanceSC: s.contracts.balance, AlphabetState: s, @@ -284,15 +283,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien return bindMorphProcessor(balanceProcessor, s) } -func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { +func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error { if s.withoutMainNet { return nil } + poolSize := cfg.GetInt("workers.frostfs") + s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.frostfs"), + PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, BalanceClient: s.balanceClient, NetmapClient: s.netmapClient, @@ -312,10 +313,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { return bindMainnetProcessor(frostfsProcessor, s) } -func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { +func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info(logs.InnerringNoControlServerEndpointSpecified) + s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -341,7 +342,7 @@ func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *ato controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log, audit) + ), log.WithTag(logger.TagGrpcSvc), audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -377,7 +378,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { // form morph container client's options morphCnrOpts := make([]container.Option, 0, 3) morphCnrOpts = append(morphCnrOpts, - container.TryNotary(), container.AsAlphabet(), ) @@ -387,12 +387,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { } s.containerClient = result.CnrClient - s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet()) + s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet()) if err != nil { return nil, err } - s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet()) + s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet()) if err != nil { return nil, err } @@ -411,7 +411,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { return result, nil } -func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error { +func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error { irf := s.createIRFetcher() s.statusIndex = newInnerRingIndexer( @@ -426,27 +426,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien return err } - err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync) + err = s.initNetmapProcessor(ctx, cfg, alphaSync) if err != nil { return err } - err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) + err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) if err != nil { return err } - err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient) + err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient) if err != nil { return err } - err = s.initFrostFSMainnetProcessor(cfg) + err = s.initFrostFSMainnetProcessor(ctx, cfg) if err != nil { return err } - err = s.initAlphabetProcessor(cfg) + err = s.initAlphabetProcessor(ctx, cfg) return err } @@ -454,16 +454,17 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } morphChain := &chainParams{ - log: s.log, + log: s.log.WithTag(logger.TagMorph), cfg: cfg, key: s.key, name: morphPrefix, from: fromSideChainBlock, morphCacheMetric: s.irMetrics.MorphCacheMetrics(), + multinetMetrics: s.irMetrics.Multinet(), } // create morph client @@ -478,7 +479,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- return nil, err } if err := s.morphClient.SetGroupSignerScope(); err != nil { - morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) + morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) } return morphChain, nil diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 50a37845b..3a5137261 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" @@ -103,6 +104,8 @@ type ( // to the application. runners []func(chan<- error) error + // cmode used for upgrade scenario. + // nolint:unused cmode *atomic.Bool } @@ -114,6 +117,7 @@ type ( sgn *transaction.Signer from uint32 // block height morphCacheMetric metrics.MorphCacheMetrics + multinetMetrics metrics.MultinetMetrics } ) @@ -136,10 +140,10 @@ var ( // Start runs all event providers. func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { - s.setHealthStatus(control.HealthStatus_STARTING) + s.setHealthStatus(ctx, control.HealthStatus_STARTING) defer func() { if err == nil { - s.setHealthStatus(control.HealthStatus_READY) + s.setHealthStatus(ctx, control.HealthStatus_READY) } }() @@ -148,12 +152,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { return err } - err = s.initConfigFromBlockchain() + err = s.initConfigFromBlockchain(ctx) if err != nil { return err } - if s.IsAlphabet() { + if s.IsAlphabet(ctx) { err = s.initMainNotary(ctx) if err != nil { return err @@ -169,14 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { prm.Validators = s.predefinedValidators // vote for sidechain validator if it is prepared in config - err = s.voteForSidechainValidator(prm) + err = s.voteForSidechainValidator(ctx, prm) if err != nil { // we don't stop inner ring execution on this error - s.log.Warn(logs.InnerringCantVoteForPreparedValidators, - zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, + zap.Error(err)) } - s.tickInitialExpoch() + s.tickInitialExpoch(ctx) morphErr := make(chan error) mainnnetErr := make(chan error) @@ -213,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { } func (s *Server) registerMorphNewBlockEventHandler() { - s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug(logs.InnerringNewBlock, + s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { + s.log.Debug(ctx, logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -231,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() { func (s *Server) registerMainnetNewBlockEventHandler() { if !s.withoutMainNet { - s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { + s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -279,11 +283,11 @@ func (s *Server) initSideNotary(ctx context.Context) error { ) } -func (s *Server) tickInitialExpoch() { +func (s *Server) tickInitialExpoch(ctx context.Context) { initialEpochTicker := timer.NewOneTickTimer( timer.StaticBlockMeter(s.initialEpochTickDelta), func() { - s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{}) + s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) }) s.addBlockTimer(initialEpochTicker) } @@ -295,16 +299,16 @@ func (s *Server) startWorkers(ctx context.Context) { } // Stop closes all subscription channels. -func (s *Server) Stop() { - s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN) +func (s *Server) Stop(ctx context.Context) { + s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) go s.morphListener.Stop() go s.mainnetListener.Stop() for _, c := range s.closers { if err := c(); err != nil { - s.log.Warn(logs.InnerringCloserError, - zap.String("error", err.Error()), + s.log.Warn(ctx, logs.InnerringCloserError, + zap.Error(err), ) } } @@ -335,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log, + log: log.WithTag(logger.TagIr), irMetrics: metrics, cmode: cmode, } @@ -345,7 +349,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED) + server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED) // parse notary support server.feeConfig = config.NewFeeConfig(cfg) @@ -372,7 +376,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.initNotaryConfig() + server.initNotaryConfig(ctx) err = server.initContracts(cfg) if err != nil { @@ -396,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - err = server.initProcessors(cfg, morphClients) + err = server.initProcessors(ctx, cfg, morphClients) if err != nil { return nil, err } - server.initTimers(cfg, morphClients) + server.initTimers(ctx, cfg) - err = server.initGRPCServer(cfg, log, audit) + err = server.initGRPCServer(ctx, cfg, log, audit) if err != nil { return nil, err } @@ -434,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev } listener, err := event.NewListener(event.ListenerParams{ - Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))}, + Logger: p.log.With(zap.String("chain", p.name)), Subscriber: sub, }) if err != nil { @@ -484,6 +488,12 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c return nil, fmt.Errorf("%s chain client endpoints not provided", p.name) } + nc := parseMultinetConfig(p.cfg, p.multinetMetrics) + ds, err := internalNet.NewDialerSource(nc) + if err != nil { + return nil, fmt.Errorf("dialer source: %w", err) + } + return client.New( ctx, p.key, @@ -496,6 +506,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c }), client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")), client.WithMorphCacheMetrics(p.morphCacheMetric), + client.WithDialerSource(ds), ) } @@ -540,21 +551,43 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) { return extraWallets, nil } -func (s *Server) initConfigFromBlockchain() error { +func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config { + nc := internalNet.Config{ + Enabled: cfg.GetBool("multinet.enabled"), + Balancer: cfg.GetString("multinet.balancer"), + Restrict: cfg.GetBool("multinet.restrict"), + FallbackDelay: cfg.GetDuration("multinet.fallback_delay"), + Metrics: m, + } + for i := 0; ; i++ { + mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i)) + if mask == "" { + break + } + sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i)) + nc.Subnets = append(nc.Subnets, internalNet.Subnet{ + Prefix: mask, + SourceIPs: sourceIPs, + }) + } + return nc +} + +func (s *Server) initConfigFromBlockchain(ctx context.Context) error { // get current epoch - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return fmt.Errorf("can't read epoch number: %w", err) } // get current epoch duration - epochDuration, err := s.netmapClient.EpochDuration() + epochDuration, err := s.netmapClient.EpochDuration(ctx) if err != nil { return fmt.Errorf("can't read epoch duration: %w", err) } // get balance precision - balancePrecision, err := s.balanceClient.Decimals() + balancePrecision, err := s.balanceClient.Decimals(ctx) if err != nil { return fmt.Errorf("can't read balance contract precision: %w", err) } @@ -564,14 +597,14 @@ func (s *Server) initConfigFromBlockchain() error { s.precision.SetBalancePrecision(balancePrecision) // get next epoch delta tick - s.initialEpochTickDelta, err = s.nextEpochBlockDelta() + s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) if err != nil { return err } - s.log.Debug(logs.InnerringReadConfigFromBlockchain, - zap.Bool("active", s.IsActive()), - zap.Bool("alphabet", s.IsAlphabet()), + s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain, + zap.Bool("active", s.IsActive(ctx)), + zap.Bool("alphabet", s.IsAlphabet(ctx)), zap.Uint64("epoch", epoch), zap.Uint32("precision", balancePrecision), zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta), @@ -580,8 +613,8 @@ func (s *Server) initConfigFromBlockchain() error { return nil } -func (s *Server) nextEpochBlockDelta() (uint32, error) { - epochBlock, err := s.netmapClient.LastEpochBlock() +func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { + epochBlock, err := s.netmapClient.LastEpochBlock(ctx) if err != nil { return 0, fmt.Errorf("can't read last epoch block: %w", err) } @@ -602,17 +635,17 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) { // onlyAlphabet wrapper around event handler that executes it // only if inner ring node is alphabet node. func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { - return func(ev event.Event) { - if s.IsAlphabet() { - f(ev) + return func(ctx context.Context, ev event.Event) { + if s.IsAlphabet(ctx) { + f(ctx, ev) } } } -func (s *Server) newEpochTickHandlers() []newEpochHandler { +func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler { newEpochHandlers := []newEpochHandler{ func() { - s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{}) + s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) }, } diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go index a9a9498b6..ae4c85168 100644 --- a/pkg/innerring/locode.go +++ b/pkg/innerring/locode.go @@ -1,15 +1,15 @@ package innerring import ( + "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" + locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" irlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb" "github.com/spf13/viper" ) -func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) { +func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { locodeDB := locodebolt.New(locodebolt.Prm{ Path: cfg.GetString("locode.db.path"), }, @@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, err return irlocode.New(irlocode.Prm{ DB: (*locodeBoltDBWrapper)(locodeDB), - }), nil + }) } type locodeBoltEntryWrapper struct { diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go index 9961710ca..fb11e9426 100644 --- a/pkg/innerring/netmap.go +++ b/pkg/innerring/netmap.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -17,8 +18,8 @@ type networkSettings netmapclient.Client // MaintenanceModeAllowed requests network configuration from the Sidechain // and check allowance of storage node's maintenance mode according to it. // Always returns state.ErrMaintenanceModeDisallowed. -func (s *networkSettings) MaintenanceModeAllowed() error { - allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed() +func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { + allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) if err != nil { return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) } else if allowed { diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index c601f5587..c8a69962f 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -28,37 +28,39 @@ const ( gasDivisor = 2 ) -func (s *Server) depositMainNotary() (tx util.Uint256, err error) { +func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err) } return s.mainnetClient.DepositNotary( + ctx, depositAmount, uint32(s.epochDuration.Load())+notaryExtraBlocks, ) } -func (s *Server) depositSideNotary() (tx util.Uint256, err error) { +func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err) } - return s.morphClient.DepositEndlessNotary(depositAmount) + tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount) + return tx, err } -func (s *Server) notaryHandler(_ event.Event) { +func (s *Server) notaryHandler(ctx context.Context, _ event.Event) { if !s.mainNotaryConfig.disabled { - _, err := s.depositMainNotary() + _, err := s.depositMainNotary(ctx) if err != nil { - s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) + s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } - if _, err := s.depositSideNotary(); err != nil { - s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) + if _, err := s.depositSideNotary(ctx); err != nil { + s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } @@ -71,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er } func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error { - tx, err := deposit() + tx, err := deposit(ctx) if err != nil { return err } @@ -80,17 +82,17 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade) + s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade) return nil } - s.log.Info(msg) + s.log.Info(ctx, msg) return await(ctx, tx) } func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error { - for i := 0; i < notaryDepositTimeout; i++ { + for range notaryDepositTimeout { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 9de075f17..d6b653282 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -1,6 +1,8 @@ package alphabet import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" @@ -8,16 +10,16 @@ import ( "go.uber.org/zap" ) -func (ap *Processor) HandleGasEmission(ev event.Event) { +func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit")) + ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool - err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit) + err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained, + ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index 346901949..1da3c401d 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -1,11 +1,13 @@ package alphabet_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -21,7 +23,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -60,7 +62,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -95,10 +97,10 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { t.Parallel() var emission uint64 = 100_000 var index int = 5 - var parsedWallets []util.Uint160 = []util.Uint160{} + var parsedWallets []util.Uint160 alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -137,7 +139,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -167,16 +169,16 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { t.Parallel() var emission uint64 = 100_000 var index int = 5 - var parsedWallets []util.Uint160 = []util.Uint160{} + var parsedWallets []util.Uint160 alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } morphClient := &testMorphClient{} - nodes := []netmap.NodeInfo{} + var nodes []netmap.NodeInfo network := &netmap.NetMap{} network.SetNodes(nodes) @@ -198,7 +200,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -219,7 +221,7 @@ type testIndexer struct { index int } -func (i *testIndexer) AlphabetIndex() int { +func (i *testIndexer) AlphabetIndex(context.Context) int { return i.index } @@ -246,7 +248,7 @@ type testMorphClient struct { batchTransferedGas []batchTransferGas } -func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) { c.invokedMethods = append(c.invokedMethods, invokedMethod{ contract: contract, @@ -254,7 +256,7 @@ func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, metho method: method, args: args, }) - return 0, nil + return client.InvokeRes{}, nil } func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { @@ -277,6 +279,6 @@ type testNetmapClient struct { netmap *netmap.NetMap } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 2317f3e98..d3d0f83f2 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -1,6 +1,7 @@ package alphabet import ( + "context" "crypto/elliptic" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -13,40 +14,40 @@ import ( const emitMethod = "emit" -func (ap *Processor) processEmit() bool { - index := ap.irList.AlphabetIndex() +func (ap *Processor) processEmit(ctx context.Context) bool { + index := ap.irList.AlphabetIndex(ctx) if index < 0 { - ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) + ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return true } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, + ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return false } // there is no signature collecting, so we don't need extra fee - _, err := ap.morphClient.Invoke(contract, 0, emitMethod) + _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) if err != nil { - ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err)) return false } if ap.storageEmission == 0 { - ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff) + ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff) return true } - networkMap, err := ap.netmapClient.NetMap() + networkMap, err := ap.netmapClient.NetMap(ctx) if err != nil { - ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, - zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, + zap.Error(err)) return false } @@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool { ap.pwLock.RUnlock() extraLen := len(pw) - ap.log.Debug(logs.AlphabetGasEmission, + ap.log.Debug(ctx, logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -68,37 +69,37 @@ func (ap *Processor) processEmit() bool { gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen)) - ap.transferGasToNetmapNodes(nmNodes, gasPerNode) + ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode) - ap.transferGasToExtraNodes(pw, gasPerNode) + ap.transferGasToExtraNodes(ctx, pw, gasPerNode) return true } -func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { for i := range nmNodes { keyBytes := nmNodes[i].PublicKey() key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn(logs.AlphabetCantParseNodePublicKey, - zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, + zap.Error(err)) continue } err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn(logs.AlphabetCantTransferGas, + ap.log.Warn(ctx, logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } } -func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) { if len(pw) > 0 { err := ap.morphClient.BatchTransferGas(pw, gasPerNode) if err != nil { @@ -106,10 +107,10 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed for i, addr := range pw { receiversLog[i] = addr.StringLE() } - ap.log.Warn(logs.AlphabetCantTransferGasToWallet, + ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index ce6679969..0aea74003 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -1,26 +1,26 @@ package alphabet import ( + "context" "errors" "fmt" "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // Indexer is a callback interface for inner ring global state. Indexer interface { - AlphabetIndex() int + AlphabetIndex(context.Context) int } // Contracts is an interface of the storage @@ -36,11 +36,11 @@ type ( } netmapClient interface { - NetMap() (*netmap.NetMap, error) + NetMap(ctx context.Context) (*netmap.NetMap, error) } morphClient interface { - Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) + Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error } @@ -85,8 +85,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) @@ -116,11 +114,6 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) { ap.pwLock.Unlock() } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index e39f3abbd..b5d05a02e 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -1,6 +1,7 @@ package balance import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -10,20 +11,20 @@ import ( "go.uber.org/zap" ) -func (bp *Processor) handleLock(ev event.Event) { +func (bp *Processor) handleLock(ctx context.Context, ev event.Event) { lock := ev.(balanceEvent.Lock) - bp.log.Info(logs.Notification, + bp.log.Info(ctx, logs.Notification, zap.String("type", "lock"), zap.String("value", hex.EncodeToString(lock.ID()))) // send an event to the worker pool err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool { - return bp.processLock(&lock) + return bp.processLock(ctx, &lock) }) if err != nil { // there system can be moved into controlled degradation stage - bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained, + bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained, zap.Int("capacity", bp.pool.Cap())) } } diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go index 86a9e15d0..0fd23d8ab 100644 --- a/pkg/innerring/processors/balance/handlers_test.go +++ b/pkg/innerring/processors/balance/handlers_test.go @@ -1,6 +1,7 @@ package balance import ( + "context" "testing" "time" @@ -30,7 +31,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) { }) require.NoError(t, err, "failed to create processor") - processor.handleLock(balanceEvent.Lock{}) + processor.handleLock(context.Background(), balanceEvent.Lock{}) for processor.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -56,7 +57,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) { }) require.NoError(t, err, "failed to create processor") - processor.handleLock(balanceEvent.Lock{}) + processor.handleLock(context.Background(), balanceEvent.Lock{}) for processor.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -69,7 +70,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -83,7 +84,7 @@ type testFrostFSContractClient struct { chequeCalls int } -func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error { +func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error { c.chequeCalls++ return nil } diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index 1d94fa454..60475908c 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" @@ -9,9 +11,9 @@ import ( // Process lock event by invoking Cheque method in main net to send assets // back to the withdraw issuer. -func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { - if !bp.alphabetState.IsAlphabet() { - bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock) +func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool { + if !bp.alphabetState.IsAlphabet(ctx) { + bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock) return true } @@ -23,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { prm.SetLock(lock.LockAccount()) prm.SetHash(lock.TxHash()) - err := bp.frostfsClient.Cheque(prm) + err := bp.frostfsClient.Cheque(ctx, prm) if err != nil { - bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err)) + bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index 5cc849b5c..34203b74f 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -1,10 +1,10 @@ package balance import ( + "context" "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -12,13 +12,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -27,7 +26,7 @@ type ( } FrostFSClient interface { - Cheque(p frostfscontract.ChequePrm) error + Cheque(ctx context.Context, p frostfscontract.ChequePrm) error } // Processor of events produced by balance contract in the morphchain. @@ -68,8 +67,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err) @@ -91,32 +88,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var parsers []event.NotificationParserInfo - - // new lock event - lock := event.NotificationParserInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetParser(balanceEvent.ParseLock) - parsers = append(parsers, lock) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var handlers []event.NotificationHandlerInfo - - // lock handler - lock := event.NotificationHandlerInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetHandler(bp.handleLock) - handlers = append(handlers, lock) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: bp.balanceSC, + Type: lockNotification, + Parser: balanceEvent.ParseLock, + Handlers: []event.Handler{bp.handleLock}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go index ba12ebb37..5334b9a1f 100644 --- a/pkg/innerring/processors/container/common.go +++ b/pkg/innerring/processors/container/common.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -45,7 +46,7 @@ type signatureVerificationData struct { // - v.binPublicKey is a public session key // - session context corresponds to the container and verb in v // - session is "alive" -func (cp *Processor) verifySignature(v signatureVerificationData) error { +func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { var err error var key frostfsecdsa.PublicKeyRFC6979 keyProvided := v.binPublicKey != nil @@ -58,7 +59,7 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { } if len(v.binTokenSession) > 0 { - return cp.verifyByTokenSession(v, &key, keyProvided) + return cp.verifyByTokenSession(ctx, v, &key, keyProvided) } if keyProvided { @@ -77,8 +78,8 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { return errors.New("signature is invalid or calculated with the key not bound to the container owner") } -func (cp *Processor) checkTokenLifetime(token session.Container) error { - curEpoch, err := cp.netState.Epoch() +func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { + curEpoch, err := cp.netState.Epoch(ctx) if err != nil { return fmt.Errorf("could not read current epoch: %w", err) } @@ -90,7 +91,7 @@ func (cp *Processor) checkTokenLifetime(token session.Container) error { return nil } -func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { +func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { var tok session.Container err := tok.Unmarshal(v.binTokenSession) @@ -118,7 +119,7 @@ func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *fros return errors.New("owner differs with token owner") } - err = cp.checkTokenLifetime(tok) + err = cp.checkTokenLifetime(ctx, tok) if err != nil { return fmt.Errorf("check session lifetime: %w", err) } diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index 8bb89abe2..bb038a3cb 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -11,44 +12,40 @@ import ( "go.uber.org/zap" ) -func (cp *Processor) handlePut(ev event.Event) { +func (cp *Processor) handlePut(ctx context.Context, ev event.Event) { put := ev.(putEvent) id := sha256.Sum256(put.Container()) - cp.log.Info(logs.Notification, + cp.log.Info(ctx, logs.Notification, zap.String("type", "container put"), zap.String("id", base58.Encode(id[:]))) // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool { - return cp.processContainerPut(put) + return cp.processContainerPut(ctx, put) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } -func (cp *Processor) handleDelete(ev event.Event) { +func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) { del := ev.(containerEvent.Delete) - cp.log.Info(logs.Notification, + cp.log.Info(ctx, logs.Notification, zap.String("type", "container delete"), zap.String("id", base58.Encode(del.ContainerID()))) // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool { - return cp.processContainerDelete(del) + return cp.processContainerDelete(ctx, del) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } - -func (cp *Processor) handleSetEACL(_ event.Event) { - cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL")) -} diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index 1aac31ae3..1b3842eb0 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -1,9 +1,9 @@ package container import ( + "context" "crypto/ecdsa" "encoding/hex" - "fmt" "testing" "time" @@ -72,7 +72,7 @@ func TestPutEvent(t *testing.T) { nr: nr, } - proc.handlePut(event) + proc.handlePut(context.Background(), event) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -144,7 +144,7 @@ func TestDeleteEvent(t *testing.T) { Signature: signature, } - proc.handleDelete(ev) + proc.handleDelete(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -161,7 +161,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -170,11 +170,11 @@ type testNetworkState struct { epoch uint64 } -func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) { +func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { return s.homHashDisabled, nil } -func (s *testNetworkState) Epoch() (uint64, error) { +func (s *testNetworkState) Epoch(context.Context) (uint64, error) { return s.epoch, nil } @@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) { +func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { key := hex.EncodeToString(cid) if cont, found := c.get[key]; found { return cont, nil @@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) type testFrostFSIDClient struct{} -func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { - return nil, fmt.Errorf("subject not found") +func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { + return &frostfsidclient.Subject{}, nil } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index a950997fd..8e4ab2623 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" "fmt" "strings" @@ -36,28 +37,28 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam // Process a new container from the user by checking the container sanity // and sending approve tx back to the morph. -func (cp *Processor) processContainerPut(put putEvent) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut) +func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut) return true } - ctx := &putContainerContext{ + pctx := &putContainerContext{ e: put, } - err := cp.checkPutContainer(ctx) + err := cp.checkPutContainer(ctx, pctx) if err != nil { - cp.log.Error(logs.ContainerPutContainerCheckFailed, - zap.String("error", err.Error()), + cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, + zap.Error(err), ) return false } - if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(logs.ContainerCouldNotApprovePutContainer, - zap.String("error", err.Error()), + if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { + cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, + zap.Error(err), ) return false } @@ -65,8 +66,8 @@ func (cp *Processor) processContainerPut(put putEvent) bool { return true } -func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { - binCnr := ctx.e.Container() +func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { + binCnr := pctx.e.Container() var cnr containerSDK.Container err := cnr.Unmarshal(binCnr) @@ -74,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { return fmt.Errorf("invalid binary container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Owner(), verb: session.VerbContainerPut, - binTokenSession: ctx.e.SessionToken(), - binPublicKey: ctx.e.PublicKey(), - signature: ctx.e.Signature(), + binTokenSession: pctx.e.SessionToken(), + binPublicKey: pctx.e.PublicKey(), + signature: pctx.e.Signature(), signedData: binCnr, }) if err != nil { @@ -87,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { } // check homomorphic hashing setting - err = checkHomomorphicHashing(cp.netState, cnr) + err = checkHomomorphicHashing(ctx, cp.netState, cnr) if err != nil { return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) } // check native name and zone - err = cp.checkNNS(ctx, cnr) + err = cp.checkNNS(ctx, pctx, cnr) if err != nil { return fmt.Errorf("NNS: %w", err) } @@ -103,24 +104,24 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { // Process delete container operation from the user by checking container sanity // and sending approve tx back to morph. -func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete) +func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete) return true } - err := cp.checkDeleteContainer(e) + err := cp.checkDeleteContainer(ctx, e) if err != nil { - cp.log.Error(logs.ContainerDeleteContainerCheckFailed, - zap.String("error", err.Error()), + cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, + zap.Error(err), ) return false } if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer, - zap.String("error", err.Error()), + cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, + zap.Error(err), ) return false @@ -129,7 +130,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { return true } -func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { +func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { binCnr := e.ContainerID() var idCnr cid.ID @@ -140,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { } // receive owner of the related container - cnr, err := cp.cnrClient.Get(binCnr) + cnr, err := cp.cnrClient.Get(ctx, binCnr) if err != nil { return fmt.Errorf("could not receive the container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Value.Owner(), verb: session.VerbContainerDelete, idContainerSet: true, @@ -162,39 +163,39 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { return nil } -func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error { +func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { // fetch domain info - ctx.d = containerSDK.ReadDomain(cnr) + pctx.d = containerSDK.ReadDomain(cnr) // if PutNamed event => check if values in container correspond to args - if named, ok := ctx.e.(interface { + if named, ok := pctx.e.(interface { Name() string Zone() string }); ok { - if name := named.Name(); name != ctx.d.Name() { - return fmt.Errorf("names differ %s/%s", name, ctx.d.Name()) + if name := named.Name(); name != pctx.d.Name() { + return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) } - if zone := named.Zone(); zone != ctx.d.Zone() { - return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone()) + if zone := named.Zone(); zone != pctx.d.Zone() { + return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) } } - namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns") - if !hasNamespace { - return nil - } - addr, err := util.Uint160DecodeBytesBE(cnr.Owner().WalletBytes()[1 : 1+util.Uint160Size]) if err != nil { return fmt.Errorf("could not get container owner address: %w", err) } - subject, err := cp.frostFSIDClient.GetSubject(addr) + subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) if err != nil { return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) } + namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") + if !hasNamespace { + return nil + } + if subject.Namespace != namespace { return errContainerAndOwnerNamespaceDontMatch } @@ -202,13 +203,13 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain return nil } -func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error { - netSetting, err := ns.HomomorphicHashDisabled() +func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { + netSetting, err := ns.HomomorphicHashDisabled(ctx) if err != nil { return fmt.Errorf("could not get setting in contract: %w", err) } - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting { + if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 8fd9edfb8..9be93baa4 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -1,11 +1,11 @@ package container import ( + "context" "errors" "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -15,18 +15,17 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ContClient interface { ContractAddress() util.Uint160 - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } MorphClient interface { @@ -34,7 +33,7 @@ type ( } FrostFSIDClient interface { - GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) + GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) } // Processor of events produced by container contract in the sidechain. @@ -69,7 +68,7 @@ type NetworkState interface { // // Must return any error encountered // which did not allow reading the value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) // HomomorphicHashDisabled must return boolean that // represents homomorphic network state: @@ -77,7 +76,7 @@ type NetworkState interface { // * false if hashing is enabled. // // which did not allow reading the value. - HomomorphicHashDisabled() (bool, error) + HomomorphicHashDisabled(ctx context.Context) (bool, error) } // New creates a container contract processor instance. @@ -97,8 +96,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: FrostFSID client is not set") } - p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err) @@ -121,11 +118,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil @@ -157,11 +149,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { p.SetParser(containerEvent.ParseDeleteNotary) pp = append(pp, p) - // set EACL - p.SetRequestType(containerEvent.SetEACLNotaryEvent) - p.SetParser(containerEvent.ParseSetEACLNotary) - pp = append(pp, p) - return pp } @@ -190,10 +177,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { h.SetHandler(cp.handleDelete) hh = append(hh, h) - // set eACL - h.SetRequestType(containerEvent.SetEACLNotaryEvent) - h.SetHandler(cp.handleSetEACL) - hh = append(hh, h) - return hh } diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index c80f9fdc5..936de2e77 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -2,6 +2,7 @@ package frostfs import ( "bytes" + "context" "encoding/hex" "slices" @@ -12,67 +13,67 @@ import ( "go.uber.org/zap" ) -func (np *Processor) handleDeposit(ev event.Event) { +func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) { deposit := ev.(frostfsEvent.Deposit) depositIDBin := bytes.Clone(deposit.ID()) slices.Reverse(depositIDBin) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "deposit"), zap.String("id", hex.EncodeToString(depositIDBin))) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool { - return np.processDeposit(deposit) + return np.processDeposit(ctx, deposit) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleWithdraw(ev event.Event) { +func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) { withdraw := ev.(frostfsEvent.Withdraw) withdrawBin := bytes.Clone(withdraw.ID()) slices.Reverse(withdrawBin) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "withdraw"), zap.String("id", hex.EncodeToString(withdrawBin))) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool { - return np.processWithdraw(withdraw) + return np.processWithdraw(ctx, withdraw) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleCheque(ev event.Event) { +func (np *Processor) handleCheque(ctx context.Context, ev event.Event) { cheque := ev.(frostfsEvent.Cheque) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "cheque"), zap.String("id", hex.EncodeToString(cheque.ID()))) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool { - return np.processCheque(cheque) + return np.processCheque(ctx, cheque) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleConfig(ev event.Event) { +func (np *Processor) handleConfig(ctx context.Context, ev event.Event) { cfg := ev.(frostfsEvent.Config) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "set config"), zap.String("key", hex.EncodeToString(cfg.Key())), zap.String("value", hex.EncodeToString(cfg.Value()))) @@ -80,11 +81,11 @@ func (np *Processor) handleConfig(ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool { - return np.processConfig(cfg) + return np.processConfig(ctx, cfg) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go index 6425172bd..72310f6f9 100644 --- a/pkg/innerring/processors/frostfs/handlers_test.go +++ b/pkg/innerring/processors/frostfs/handlers_test.go @@ -1,6 +1,7 @@ package frostfs import ( + "context" "testing" "time" @@ -36,7 +37,7 @@ func TestHandleDeposit(t *testing.T) { AmountValue: 1000, } - proc.handleDeposit(ev) + proc.handleDeposit(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -57,7 +58,7 @@ func TestHandleDeposit(t *testing.T) { es.epochCounter = 109 - proc.handleDeposit(ev) + proc.handleDeposit(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -98,7 +99,7 @@ func TestHandleWithdraw(t *testing.T) { AmountValue: 1000, } - proc.handleWithdraw(ev) + proc.handleWithdraw(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -139,7 +140,7 @@ func TestHandleCheque(t *testing.T) { LockValue: util.Uint160{200}, } - proc.handleCheque(ev) + proc.handleCheque(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -176,7 +177,7 @@ func TestHandleConfig(t *testing.T) { TxHashValue: util.Uint256{100}, } - proc.handleConfig(ev) + proc.handleConfig(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -225,7 +226,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -241,17 +242,17 @@ type testBalaceClient struct { burn []balance.BurnPrm } -func (c *testBalaceClient) Mint(p balance.MintPrm) error { +func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error { c.mint = append(c.mint, p) return nil } -func (c *testBalaceClient) Lock(p balance.LockPrm) error { +func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error { c.lock = append(c.lock, p) return nil } -func (c *testBalaceClient) Burn(p balance.BurnPrm) error { +func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error { c.burn = append(c.burn, p) return nil } @@ -260,7 +261,7 @@ type testNetmapClient struct { config []nmClient.SetConfigPrm } -func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error { +func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error { c.config = append(c.config, p) return nil } diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index c72aeceee..d10eb9660 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -1,6 +1,8 @@ package frostfs import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" @@ -15,9 +17,9 @@ const ( // Process deposit event by invoking a balance contract and sending native // gas in the sidechain. -func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit) +func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit) return true } @@ -28,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { prm.SetID(deposit.ID()) // send transferX to a balance contract - err := np.balanceClient.Mint(prm) + err := np.balanceClient.Mint(ctx, prm) if err != nil { - np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined, + np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined, zap.Stringer("receiver", receiver), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return false } if balance < np.gasBalanceThreshold { - np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached, + np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -70,8 +72,8 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver, - zap.String("error", err.Error())) + np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, + zap.Error(err)) return false } @@ -82,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { } // Process withdraw event by locking assets in the balance account. -func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw) +func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw) return true } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err)) return false } @@ -105,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount())) prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime)) - err = np.balanceClient.Lock(prm) + err = np.balanceClient.Lock(ctx, prm) if err != nil { - np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) return false } @@ -116,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { // Process cheque event by transferring assets from the lock account back to // the reserve account. -func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque) +func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque) return true } @@ -128,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount())) prm.SetID(cheque.ID()) - err := np.balanceClient.Burn(prm) + err := np.balanceClient.Burn(ctx, prm) if err != nil { - np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index 2ae3e6ced..dc579f6bb 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -1,6 +1,8 @@ package frostfs import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" @@ -9,9 +11,9 @@ import ( // Process config event by setting configuration value from the mainchain in // the sidechain. -func (np *Processor) processConfig(config frostfsEvent.Config) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig) +func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig) return true } @@ -22,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool { prm.SetValue(config.Value()) prm.SetHash(config.TxHash()) - err := np.netmapClient.SetConfig(prm) + err := np.netmapClient.SetConfig(ctx, prm) if err != nil { - np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index 2019857ac..9d3bf65cd 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -1,11 +1,11 @@ package frostfs import ( + "context" "errors" "fmt" "sync" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -16,7 +16,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -27,7 +26,7 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -36,13 +35,13 @@ type ( } BalanceClient interface { - Mint(p balance.MintPrm) error - Lock(p balance.LockPrm) error - Burn(p balance.BurnPrm) error + Mint(ctx context.Context, p balance.MintPrm) error + Lock(ctx context.Context, p balance.LockPrm) error + Burn(ctx context.Context, p balance.BurnPrm) error } NetmapClient interface { - SetConfig(p nmClient.SetConfigPrm) error + SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error } MorphClient interface { @@ -110,8 +109,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) @@ -145,70 +142,34 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var ( - parsers = make([]event.NotificationParserInfo, 0, 6) - - p event.NotificationParserInfo - ) - - p.SetScriptHash(np.frostfsContract) - - // deposit event - p.SetType(event.TypeFromString(depositNotification)) - p.SetParser(frostfsEvent.ParseDeposit) - parsers = append(parsers, p) - - // withdraw event - p.SetType(event.TypeFromString(withdrawNotification)) - p.SetParser(frostfsEvent.ParseWithdraw) - parsers = append(parsers, p) - - // cheque event - p.SetType(event.TypeFromString(chequeNotification)) - p.SetParser(frostfsEvent.ParseCheque) - parsers = append(parsers, p) - - // config event - p.SetType(event.TypeFromString(configNotification)) - p.SetParser(frostfsEvent.ParseConfig) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var ( - handlers = make([]event.NotificationHandlerInfo, 0, 6) - - h event.NotificationHandlerInfo - ) - - h.SetScriptHash(np.frostfsContract) - - // deposit handler - h.SetType(event.TypeFromString(depositNotification)) - h.SetHandler(np.handleDeposit) - handlers = append(handlers, h) - - // withdraw handler - h.SetType(event.TypeFromString(withdrawNotification)) - h.SetHandler(np.handleWithdraw) - handlers = append(handlers, h) - - // cheque handler - h.SetType(event.TypeFromString(chequeNotification)) - h.SetHandler(np.handleCheque) - handlers = append(handlers, h) - - // config handler - h.SetType(event.TypeFromString(configNotification)) - h.SetHandler(np.handleConfig) - handlers = append(handlers, h) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.frostfsContract, + Type: event.TypeFromString(depositNotification), + Parser: frostfsEvent.ParseDeposit, + Handlers: []event.Handler{np.handleDeposit}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(withdrawNotification), + Parser: frostfsEvent.ParseWithdraw, + Handlers: []event.Handler{np.handleWithdraw}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(chequeNotification), + Parser: frostfsEvent.ParseCheque, + Handlers: []event.Handler{np.handleCheque}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(configNotification), + Parser: frostfsEvent.ParseConfig, + Handlers: []event.Handler{np.handleConfig}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go index fd7f539c3..7e8ab629d 100644 --- a/pkg/innerring/processors/governance/handlers.go +++ b/pkg/innerring/processors/governance/handlers.go @@ -1,6 +1,8 @@ package governance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -11,7 +13,7 @@ import ( "go.uber.org/zap" ) -func (gp *Processor) HandleAlphabetSync(e event.Event) { +func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) { var ( typ string hash util.Uint256 @@ -32,16 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { return } - gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ)) + gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ)) // send event to the worker pool err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool { - return gp.processAlphabetSync(hash) + return gp.processAlphabetSync(ctx, hash) }) if err != nil { // there system can be moved into controlled degradation stage - gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained, + gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained, zap.Int("capacity", gp.pool.Cap())) } } diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 2a505f8d1..864c5da67 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -1,6 +1,7 @@ package governance import ( + "context" "encoding/binary" "sort" "testing" @@ -8,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -38,7 +38,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { alphabetKeys: testKeys.mainnetKeys, } f := &testFrostFSClient{} - nm := &testNetmapClient{} proc, err := New( &Params{ @@ -50,7 +49,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { MorphClient: m, MainnetClient: mn, FrostFSClient: f, - NetmapClient: nm, }, ) @@ -60,7 +58,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { txHash: util.Uint256{100}, } - proc.HandleAlphabetSync(ev) + proc.HandleAlphabetSync(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -73,10 +71,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { }, }, v.votes, "invalid vote calls") - var irUpdateExp []nmClient.UpdateIRPrm - - require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates") - var expAlphabetUpdate client.UpdateAlphabetListPrm expAlphabetUpdate.SetHash(ev.txHash) expAlphabetUpdate.SetList(testKeys.newInnerRingExp) @@ -119,7 +113,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { alphabetKeys: testKeys.mainnetKeys, } f := &testFrostFSClient{} - nm := &testNetmapClient{} proc, err := New( &Params{ @@ -131,7 +124,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { MorphClient: m, MainnetClient: mn, FrostFSClient: f, - NetmapClient: nm, }, ) @@ -142,7 +134,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { Role: noderoles.NeoFSAlphabet, } - proc.HandleAlphabetSync(ev) + proc.HandleAlphabetSync(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -155,9 +147,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { }, }, v.votes, "invalid vote calls") - var irUpdatesExp []nmClient.UpdateIRPrm - require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates") - var alpabetUpdExp client.UpdateAlphabetListPrm alpabetUpdExp.SetList(testKeys.newInnerRingExp) alpabetUpdExp.SetHash(ev.TxHash) @@ -190,7 +179,7 @@ func generateTestKeys(t *testing.T) testKeys { for { var result testKeys - for i := 0; i < 4; i++ { + for range 4 { pk, err := keys.NewPrivateKey() require.NoError(t, err, "failed to create private key") result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey()) @@ -230,7 +219,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -238,7 +227,7 @@ type testVoter struct { votes []VoteValidatorPrm } -func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error { +func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error { v.votes = append(v.votes, prm) return nil } @@ -247,7 +236,7 @@ type testIRFetcher struct { publicKeys keys.PublicKeys } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { return f.publicKeys, nil } @@ -262,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) { return c.commiteeKeys, nil } -func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error { +func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error { c.alphabetUpdates = append(c.alphabetUpdates, prm) return nil } -func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error { +func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error { c.notaryUpdates = append(c.notaryUpdates, prm) return nil } @@ -277,7 +266,7 @@ type testMainnetClient struct { designateHash util.Uint160 } -func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { return c.alphabetKeys, nil } @@ -289,16 +278,7 @@ type testFrostFSClient struct { updates []frostfscontract.AlphabetUpdatePrm } -func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error { - c.updates = append(c.updates, p) - return nil -} - -type testNetmapClient struct { - updates []nmClient.UpdateIRPrm -} - -func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error { +func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error { c.updates = append(c.updates, p) return nil } diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go index d099ec837..4ecebf05b 100644 --- a/pkg/innerring/processors/governance/list_test.go +++ b/pkg/innerring/processors/governance/list_test.go @@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) { } ln := len(rounds) - for i := 0; i < ln; i++ { + for i := range ln { list, err = newAlphabetList(list, exp) require.NoError(t, err) require.True(t, equalPublicKeyLists(list, rounds[i])) @@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) { func generateKeys(n int) (keys.PublicKeys, error) { pubKeys := make(keys.PublicKeys, 0, n) - for i := 0; i < n; i++ { + for range n { privKey, err := keys.NewPrivateKey() if err != nil { return nil, err diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 50ba58e77..6e22abb3c 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -1,6 +1,7 @@ package governance import ( + "context" "encoding/binary" "encoding/hex" "sort" @@ -18,39 +19,39 @@ const ( alphabetUpdateIDPrefix = "AlphabetUpdate" ) -func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { - if !gp.alphabetState.IsAlphabet() { - gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) +func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool { + if !gp.alphabetState.IsAlphabet(ctx) { + gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return true } - mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() + mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) if err != nil { - gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, + zap.Error(err)) return false } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { - gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain, + zap.Error(err)) return false } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { - gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, + zap.Error(err)) return false } if newAlphabet == nil { - gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) + gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) return true } - gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, + gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), zap.String("new_alphabet", prettyKeys(newAlphabet)), ) @@ -61,22 +62,22 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { } // 1. Vote to sidechain committee via alphabet contracts. - err = gp.voter.VoteForSidechainValidator(votePrm) + err = gp.voter.VoteForSidechainValidator(ctx, votePrm) if err != nil { - gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee, + zap.Error(err)) } // 2. Update NeoFSAlphabet role in the sidechain. - gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash) + gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash) // 3. Update notary role in the sidechain. - gp.updateNotaryRoleInSidechain(newAlphabet, txHash) + gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash) // 4. Update FrostFS contract in the mainnet. - gp.updateFrostFSContractInMainnet(newAlphabet) + gp.updateFrostFSContractInMainnet(ctx, newAlphabet) - gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate) + gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate) return true } @@ -93,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string { return strings.TrimRight(sb.String(), delimiter) } -func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { - innerRing, err := gp.irFetcher.InnerRingKeys() +func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { + innerRing, err := gp.irFetcher.InnerRingKeys(ctx) if err != nil { - gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, + zap.Error(err)) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, + zap.Error(err)) return } sort.Sort(newInnerRing) - gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList, + gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -119,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl updPrm.SetList(newInnerRing) updPrm.SetHash(txHash) - if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil { - gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { + gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, + zap.Error(err)) } } -func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) { +func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) { updPrm := client.UpdateNotaryListPrm{} updPrm.SetList(newAlphabet) updPrm.SetHash(txHash) - err := gp.morphClient.UpdateNotaryList(updPrm) + err := gp.morphClient.UpdateNotaryList(ctx, updPrm) if err != nil { - gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, + zap.Error(err)) } } -func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) { +func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) { epoch := gp.epochState.EpochCounter() buf := make([]byte, 8) @@ -151,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) prm.SetID(id) prm.SetPubs(newAlphabet) - err := gp.frostfsClient.AlphabetUpdate(prm) + err := gp.frostfsClient.AlphabetUpdate(ctx, prm) if err != nil { - gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, - zap.String("error", err.Error())) + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, + zap.Error(err)) } } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index fa267eade..2d131edda 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -1,6 +1,7 @@ package governance import ( + "context" "errors" "fmt" @@ -25,7 +26,7 @@ const ProcessorPoolSize = 1 type ( // AlphabetState is a callback interface for innerring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ) @@ -38,7 +39,7 @@ type VoteValidatorPrm struct { // Voter is a callback interface for alphabet contract voting. type Voter interface { - VoteForSidechainValidator(VoteValidatorPrm) error + VoteForSidechainValidator(context.Context, VoteValidatorPrm) error } type ( @@ -51,11 +52,11 @@ type ( // Implementation must take into account availability of // the notary contract. IRFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } FrostFSClient interface { - AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error + AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error } NetmapClient interface { @@ -63,14 +64,14 @@ type ( } MainnetClient interface { - NeoFSAlphabetList() (res keys.PublicKeys, err error) + NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) GetDesignateHash() util.Uint160 } MorphClient interface { Committee() (res keys.PublicKeys, err error) - UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error - UpdateNotaryList(prm client.UpdateNotaryListPrm) error + UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error + UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error } // Processor of events related to governance in the network. @@ -79,7 +80,6 @@ type ( metrics metrics.Register pool *ants.Pool frostfsClient FrostFSClient - netmapClient NetmapClient alphabetState AlphabetState epochState EpochState @@ -105,7 +105,6 @@ type ( MorphClient MorphClient MainnetClient MainnetClient FrostFSClient FrostFSClient - NetmapClient NetmapClient } ) @@ -146,7 +145,6 @@ func New(p *Params) (*Processor, error) { metrics: metricsRegister, pool: pool, frostfsClient: p.FrostFSClient, - netmapClient: p.NetmapClient, alphabetState: p.AlphabetState, epochState: p.EpochState, voter: p.Voter, @@ -157,22 +155,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var pi event.NotificationParserInfo - pi.SetScriptHash(gp.designate) - pi.SetType(event.TypeFromString(native.DesignationEventName)) - pi.SetParser(rolemanagement.ParseDesignate) - return []event.NotificationParserInfo{pi} -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var hi event.NotificationHandlerInfo - hi.SetScriptHash(gp.designate) - hi.SetType(event.TypeFromString(native.DesignationEventName)) - hi.SetHandler(gp.HandleAlphabetSync) - return []event.NotificationHandlerInfo{hi} + return []event.NotificationHandlerInfo{ + { + Contract: gp.designate, + Type: event.TypeFromString(native.DesignationEventName), + Parser: rolemanagement.ParseDesignate, + Handlers: []event.Handler{gp.HandleAlphabetSync}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go index c18611569..abd5b089a 100644 --- a/pkg/innerring/processors/netmap/cleanup_table.go +++ b/pkg/innerring/processors/netmap/cleanup_table.go @@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) { } access.binNodeInfo = binNodeInfo - access.maintenance = nmNodes[i].IsMaintenance() + access.maintenance = nmNodes[i].Status().IsMaintenance() newMap[keyString] = access } diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go index ae5620733..208bd5496 100644 --- a/pkg/innerring/processors/netmap/cleanup_table_test.go +++ b/pkg/innerring/processors/netmap/cleanup_table_test.go @@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) { t.Run("skip maintenance nodes", func(t *testing.T) { cnt := 0 - infos[1].SetMaintenance() + infos[1].SetStatus(netmap.Maintenance) key := netmap.StringifyPublicKey(infos[1]) c.update(networkMap, 5) diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index c6053e281..4c7199a49 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -11,93 +12,93 @@ import ( "go.uber.org/zap" ) -func (np *Processor) HandleNewEpochTick(ev event.Event) { +func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info(logs.NetmapTick, zap.String("type", "epoch")) + np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick) + err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleNewEpoch(ev event.Event) { +func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) { epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "new epoch"), zap.Uint64("value", epochEvent.EpochNumber())) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool { - return np.processNewEpoch(epochEvent) + return np.processNewEpoch(ctx, epochEvent) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleAddPeer(ev event.Event) { +func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) { newPeer := ev.(netmapEvent.AddPeer) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "add peer"), ) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool { - return np.processAddPeer(newPeer) + return np.processAddPeer(ctx, newPeer) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleUpdateState(ev event.Event) { +func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) { updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info(logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "update peer state"), zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool { - return np.processUpdatePeer(updPeer) + return np.processUpdatePeer(ctx, updPeer) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleCleanupTick(ev event.Event) { +func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) { if !np.netmapSnapshot.enabled { - np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518) + np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518) return } cleanup := ev.(netmapCleanupTick) - np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner")) + np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner")) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { - return np.processNetmapCleanupTick(cleanup) + return np.processNetmapCleanupTick(ctx, cleanup) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index b34abb78c..934c3790d 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -1,19 +1,19 @@ package netmap import ( + "context" "fmt" "testing" "time" - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -39,7 +39,7 @@ func TestNewEpochTick(t *testing.T) { require.NoError(t, err, "failed to create processor") ev := timerEvent.NewEpochTick{} - proc.HandleNewEpochTick(ev) + proc.HandleNewEpochTick(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -68,7 +68,6 @@ func TestNewEpoch(t *testing.T) { duration: 10, } r := &testEpochResetter{} - cc := &testContainerClient{} nc := &testNetmapClient{ epochDuration: 20, txHeights: map[util.Uint256]uint32{ @@ -82,7 +81,6 @@ func TestNewEpoch(t *testing.T) { p.NotaryDepositHandler = eh.Handle p.AlphabetSyncHandler = eh.Handle p.NetmapClient = nc - p.ContainerWrapper = cc p.EpochTimer = r p.EpochState = es }) @@ -93,7 +91,7 @@ func TestNewEpoch(t *testing.T) { Num: 101, Hash: util.Uint256{101}, } - proc.handleNewEpoch(ev) + proc.handleNewEpoch(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -103,11 +101,6 @@ func TestNewEpoch(t *testing.T) { require.Equal(t, ev.Num, es.counter, "invalid epoch counter") require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets") - var expEstimation cntClient.StartEstimationPrm - expEstimation.SetEpoch(ev.Num - 1) - expEstimation.SetHash(ev.Hash) - require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations") - require.EqualValues(t, []event.Event{ governance.NewSyncEvent(ev.TxHash()), ev, @@ -138,7 +131,7 @@ func TestAddPeer(t *testing.T) { MainTransaction: &transaction.Transaction{}, }, } - proc.handleAddPeer(ev) + proc.handleAddPeer(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -146,14 +139,14 @@ func TestAddPeer(t *testing.T) { require.Nil(t, nc.notaryInvokes, "invalid notary invokes") - node.SetOnline() + node.SetStatus(netmap.Online) ev = netmapEvent.AddPeer{ NodeBytes: node.Marshal(), Request: &payload.P2PNotaryRequest{ MainTransaction: &transaction.Transaction{}, }, } - proc.handleAddPeer(ev) + proc.handleAddPeer(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -196,7 +189,7 @@ func TestUpdateState(t *testing.T) { MainTransaction: &transaction.Transaction{}, }, } - proc.handleUpdateState(ev) + proc.handleUpdateState(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -240,7 +233,7 @@ func TestCleanupTick(t *testing.T) { txHash: util.Uint256{123}, } - proc.handleCleanupTick(ev) + proc.handleCleanupTick(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -274,7 +267,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { as := &testAlphabetState{ isAlphabet: true, } - cc := &testContainerClient{} nc := &testNetmapClient{} eh := &testEventHandler{} @@ -288,7 +280,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { EpochState: es, EpochTimer: r, AlphabetState: as, - ContainerWrapper: cc, NetmapClient: nc, NotaryDepositHandler: eh.Handle, AlphabetSyncHandler: eh.Handle, @@ -303,7 +294,7 @@ type testNodeStateSettings struct { maintAllowed bool } -func (s *testNodeStateSettings) MaintenanceModeAllowed() error { +func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { if s.maintAllowed { return nil } @@ -312,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed() error { type testValidator struct{} -func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error { +func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { return nil } @@ -350,19 +341,10 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } -type testContainerClient struct { - estimations []cntClient.StartEstimationPrm -} - -func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error { - c.estimations = append(c.estimations, p) - return nil -} - type notaryInvoke struct { contract util.Uint160 fee fixedn.Fixed8 @@ -383,7 +365,7 @@ type testNetmapClient struct { invokedTxs []*transaction.Transaction } -func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { +func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{ contract: contract, fee: fee, @@ -399,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testNetmapClient) EpochDuration() (uint64, error) { +func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { return c.epochDuration, nil } @@ -410,11 +392,11 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { return 0, fmt.Errorf("not found") } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } -func (c *testNetmapClient) NewEpoch(epoch uint64) error { +func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error { c.newEpochs = append(c.newEpochs, epoch) return nil } @@ -432,6 +414,6 @@ type testEventHandler struct { handledEvents []event.Event } -func (h *testEventHandler) Handle(e event.Event) { +func (h *testEventHandler) Handle(_ context.Context, e event.Event) { h.handledEvents = append(h.handledEvents, e) } diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go index d071a7792..b81dc9989 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go @@ -1,10 +1,11 @@ package locode import ( + "context" "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" + "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -29,7 +30,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record // - Continent: R.Continent().String(). // // UN-LOCODE attribute remains untouched. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { attrLocode := n.LOCODE() if attrLocode == "" { return nil diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go index 6697391e8..fa2dd1ac1 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go @@ -1,13 +1,14 @@ package locode_test import ( + "context" "errors" "fmt" "testing" + locodestd "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode" - locodestd "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -92,7 +93,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { t.Run("w/o locode", func(t *testing.T) { n := nodeInfoWithSomeAttrs() - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) }) @@ -102,7 +103,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttrValue(n, "WRONG LOCODE") - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -111,7 +112,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -119,7 +120,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, r.LOCODE) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go index e6332261e..ba5db9205 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go @@ -1,45 +1,45 @@ package locode import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" + "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" ) // Record is an interface of read-only // FrostFS LOCODE database single entry. type Record interface { - // Must return ISO 3166-1 alpha-2 + // CountryCode must return ISO 3166-1 alpha-2 // country code. // // Must not return nil. CountryCode() *locodedb.CountryCode - // Must return English short country name + // CountryName must return English short country name // officially used by the ISO 3166 // Maintenance Agency (ISO 3166/MA). CountryName() string - // Must return UN/LOCODE 3-character code + // LocationCode must return UN/LOCODE 3-character code // for the location (numerals 2-9 may also // be used). // // Must not return nil. LocationCode() *locodedb.LocationCode - // Must return name of the location which + // LocationName must return name of the location which // have been allocated a UN/LOCODE without // diacritic sign. LocationName() string - // Must return ISO 1-3 character alphabetic + // SubDivCode Must return ISO 1-3 character alphabetic // and/or numeric code for the administrative // division of the country concerned. SubDivCode() string - // Must return subdivision name. + // SubDivName must return subdivision name. SubDivName() string - // Must return existing continent where is + // Continent must return existing continent where is // the location. // // Must not return nil. @@ -49,7 +49,7 @@ type Record interface { // DB is an interface of read-only // FrostFS LOCODE database. type DB interface { - // Must find the record that corresponds to + // Get must find the record that corresponds to // LOCODE and provides the Record interface. // // Must return an error if Record is nil. diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go index 126f36582..0e4628ac7 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go @@ -1,6 +1,7 @@ package maddress import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -8,7 +9,7 @@ import ( ) // VerifyAndUpdate calls network.VerifyAddress. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { err := network.VerifyMultiAddress(*n) if err != nil { return fmt.Errorf("could not verify multiaddress: %w", err) diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go index 4094e50a5..03c41a451 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go @@ -7,6 +7,7 @@ map candidates. package state import ( + "context" "errors" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -23,7 +24,7 @@ type NetworkSettings interface { // no error if allowed; // ErrMaintenanceModeDisallowed if disallowed; // other error if there are any problems with the check. - MaintenanceModeAllowed() error + MaintenanceModeAllowed(ctx context.Context) error } // NetMapCandidateValidator represents tool which checks state of nodes which @@ -55,13 +56,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting // MUST NOT be called before SetNetworkSettings. // // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. -func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error { - if node.IsOnline() { +func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { + if node.Status().IsOnline() { return nil } - if node.IsMaintenance() { - return x.netSettings.MaintenanceModeAllowed() + if node.Status().IsMaintenance() { + return x.netSettings.MaintenanceModeAllowed(ctx) } return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go index a557628f0..cbf48a710 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go @@ -1,6 +1,7 @@ package state_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -13,7 +14,7 @@ type testNetworkSettings struct { disallowed bool } -func (x testNetworkSettings) MaintenanceModeAllowed() error { +func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { if x.disallowed { return state.ErrMaintenanceModeDisallowed } @@ -41,22 +42,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { }, { name: "ONLINE", - preparer: (*netmap.NodeInfo).SetOnline, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) }, valid: true, }, { name: "OFFLINE", - preparer: (*netmap.NodeInfo).SetOffline, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) }, valid: false, }, { name: "MAINTENANCE/allowed", - preparer: (*netmap.NodeInfo).SetMaintenance, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, valid: true, }, { name: "MAINTENANCE/disallowed", - preparer: (*netmap.NodeInfo).SetMaintenance, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, valid: false, validatorPreparer: func(v *state.NetMapCandidateValidator) { var s testNetworkSettings @@ -81,7 +82,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { testCase.validatorPreparer(&v) } - err := v.VerifyAndUpdate(&node) + err := v.VerifyAndUpdate(context.Background(), &node) if testCase.valid { require.NoError(t, err, testCase.name) diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go index e9b24e024..3dbe98a8d 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go @@ -1,6 +1,8 @@ package nodevalidation import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -26,9 +28,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator { // VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. // // If error appears, returns it immediately. -func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error { +func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { for _, v := range c.validators { - if err := v.VerifyAndUpdate(ni); err != nil { + if err := v.VerifyAndUpdate(ctx, ni); err != nil { return err } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 170c39e2c..8f8cc17ff 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -1,15 +1,17 @@ package netmap import ( - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" ) -func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) +func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return true } @@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode, + np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) + np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -31,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { const methodUpdateStateNotary = "updateStateIR" err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, uint32(ev.epoch), @@ -39,14 +42,14 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { int64(v2netmap.Offline), key.Bytes(), ) if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache, - zap.String("error", err.Error())) + np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index f3cb9837f..7c78d24a5 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -1,22 +1,23 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "go.uber.org/zap" ) // Process new epoch notification by setting global epoch value and resetting // local epoch timer. -func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { +func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { epoch := ev.EpochNumber() - epochDuration, err := np.netmapClient.EpochDuration() + epochDuration, err := np.netmapClient.EpochDuration(ctx) if err != nil { - np.log.Warn(logs.NetmapCantGetEpochDuration, - zap.String("error", err.Error())) + np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, + zap.Error(err)) } else { np.epochState.SetEpochDuration(epochDuration) } @@ -25,61 +26,46 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { h, err := np.netmapClient.MorphTxHeight(ev.TxHash()) if err != nil { - np.log.Warn(logs.NetmapCantGetTransactionHeight, + np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), - zap.String("error", err.Error())) + zap.Error(err)) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn(logs.NetmapCantResetEpochTimer, - zap.String("error", err.Error())) + np.log.Warn(ctx, logs.NetmapCantResetEpochTimer, + zap.Error(err)) } // get new netmap snapshot - networkMap, err := np.netmapClient.NetMap() + networkMap, err := np.netmapClient.NetMap(ctx) if err != nil { - np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup, - zap.String("error", err.Error())) + np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, + zap.Error(err)) return false } - prm := cntClient.StartEstimationPrm{} - - prm.SetEpoch(epoch - 1) - prm.SetHash(ev.TxHash()) - - if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch - err = np.containerWrp.StartEstimation(prm) - - if err != nil { - np.log.Warn(logs.NetmapCantStartContainerSizeEstimation, - zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) - } - } - np.netmapSnapshot.update(*networkMap, epoch) - np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) - np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash())) - np.handleNotaryDeposit(ev) + np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) + np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash())) + np.handleNotaryDeposit(ctx, ev) return true } // Process new epoch tick by invoking new epoch method in network map contract. -func (np *Processor) processNewEpochTick() bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick) +func (np *Processor) processNewEpochTick(ctx context.Context) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return true } nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) + np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) - err := np.netmapClient.NewEpoch(nextEpoch) + err := np.netmapClient.NewEpoch(ctx, nextEpoch) if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 41e4bfb7e..b5c727cc7 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -12,9 +13,9 @@ import ( // Process add peer notification by sanity check of new node // local epoch timer. -func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) +func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return true } @@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { tx := ev.NotaryRequest().MainTransaction ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn(logs.NetmapNonhaltNotaryTransaction, + np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -33,15 +34,15 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn(logs.NetmapCantParseNetworkMapCandidate) + np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate) return false } // validate and update node info - err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) + err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) if err != nil { - np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, - zap.String("error", err.Error()), + np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, + zap.Error(err), ) return false @@ -62,8 +63,8 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // But there is no guarantee that code will be executed in the same order. // That is why we need to perform `addPeerIR` only in case when node is online, // because in scope of this method, contract set state `ONLINE` for the node. - if updated && nodeInfo.IsOnline() { - np.log.Info(logs.NetmapApprovingNetworkMapCandidate, + if updated && nodeInfo.Status().IsOnline() { + np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -76,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // create new notary request with the original nonce err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, ev.NotaryRequest().MainTransaction.Nonce, @@ -83,9 +85,8 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { methodAddPeerNotary, nodeInfoBinary, ) - if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) return false } } @@ -94,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { } // Process update peer notification by sending approval tx to the smart contract. -func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) +func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return true } @@ -107,9 +108,9 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { var err error if ev.Maintenance() { - err = np.nodeStateSettings.MaintenanceModeAllowed() + err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) if err != nil { - np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState, + np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -118,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { } if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index e8fb8721b..277bca1c3 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -1,13 +1,12 @@ package netmap import ( + "context" "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -17,7 +16,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -36,14 +34,14 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // NodeValidator wraps basic method of checking the correctness // of information about the node and its finalization for adding // to the network map. NodeValidator interface { - // Must verify and optionally update NodeInfo structure. + // VerifyAndUpdate must verify and optionally update NodeInfo structure. // // Must return an error if NodeInfo input is invalid. // Must return an error if it is not possible to correctly @@ -51,24 +49,20 @@ type ( // // If no error occurs, the parameter must point to the // ready-made NodeInfo structure. - VerifyAndUpdate(*netmap.NodeInfo) error + VerifyAndUpdate(context.Context, *netmap.NodeInfo) error } Client interface { - MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error + MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 - EpochDuration() (uint64, error) + EpochDuration(ctx context.Context) (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) - NetMap() (*netmap.NetMap, error) - NewEpoch(epoch uint64) error + NetMap(ctx context.Context) (*netmap.NetMap, error) + NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error } - ContainerClient interface { - StartEstimation(p cntClient.StartEstimationPrm) error - } - // Processor of events produced by network map contract // and new epoch ticker, because it is related to contract. Processor struct { @@ -80,7 +74,6 @@ type ( alphabetState AlphabetState netmapClient Client - containerWrp ContainerClient netmapSnapshot cleanupTable @@ -103,7 +96,6 @@ type ( AlphabetState AlphabetState CleanupEnabled bool CleanupThreshold uint64 // in epochs - ContainerWrapper ContainerClient AlphabetSyncHandler event.Handler NotaryDepositHandler event.Handler @@ -133,16 +125,12 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: alphabet sync handler is not set") case p.NotaryDepositHandler == nil: return nil, errors.New("ir/netmap: notary deposit handler is not set") - case p.ContainerWrapper == nil: - return nil, errors.New("ir/netmap: container contract wrapper is not set") case p.NodeValidator == nil: return nil, errors.New("ir/netmap: node validator is not set") case p.NodeStateSettings == nil: return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err) @@ -161,7 +149,6 @@ func New(p *Params) (*Processor, error) { epochState: p.EpochState, alphabetState: p.AlphabetState, netmapClient: p.NetmapClient, - containerWrp: p.ContainerWrapper, netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold), handleAlphabetSync: p.AlphabetSyncHandler, @@ -174,36 +161,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - parsers := make([]event.NotificationParserInfo, 0, 3) - - var p event.NotificationParserInfo - - p.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch event - p.SetType(newEpochNotification) - p.SetParser(netmapEvent.ParseNewEpoch) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - handlers := make([]event.NotificationHandlerInfo, 0, 3) - - var i event.NotificationHandlerInfo - - i.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch handler - i.SetType(newEpochNotification) - i.SetHandler(np.handleNewEpoch) - handlers = append(handlers, i) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.netmapClient.ContractAddress(), + Type: newEpochNotification, + Parser: netmapEvent.ParseNewEpoch, + Handlers: []event.Handler{np.handleNewEpoch}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index e75fdaf40..310f12248 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -18,13 +20,13 @@ type netmapClientWrapper struct { netmapClient *netmapclient.Client } -func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error { - _, err := w.netmapClient.UpdatePeerState(p) +func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error { + _, err := w.netmapClient.UpdatePeerState(ctx, p) return err } -func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { - _, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...) +func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { + _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...) return err } @@ -32,28 +34,28 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 { return w.netmapClient.ContractAddress() } -func (w *netmapClientWrapper) EpochDuration() (uint64, error) { - return w.netmapClient.EpochDuration() +func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { + return w.netmapClient.EpochDuration(ctx) } func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { return w.netmapClient.Morph().TxHeight(h) } -func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { - return w.netmapClient.NetMap() +func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { + return w.netmapClient.NetMap(ctx) } -func (w *netmapClientWrapper) NewEpoch(epoch uint64) error { - return w.netmapClient.NewEpoch(epoch) +func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { + return w.netmapClient.NewEpoch(ctx, epoch) } func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { return w.netmapClient.Morph().IsValidScript(script, signers) } -func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error { - return w.netmapClient.AddPeer(p) +func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error { + return w.netmapClient.AddPeer(ctx, p) } func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 1616dbb9f..0ef771359 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sort" @@ -47,21 +48,21 @@ func (s *Server) SetEpochDuration(val uint64) { } // IsActive is a getter for a global active flag state. -func (s *Server) IsActive() bool { - return s.InnerRingIndex() >= 0 +func (s *Server) IsActive(ctx context.Context) bool { + return s.InnerRingIndex(ctx) >= 0 } // IsAlphabet is a getter for a global alphabet flag state. -func (s *Server) IsAlphabet() bool { - return s.AlphabetIndex() >= 0 +func (s *Server) IsAlphabet(ctx context.Context) bool { + return s.AlphabetIndex(ctx) >= 0 } // InnerRingIndex is a getter for a global index of node in inner ring list. Negative // index means that node is not in the inner ring list. -func (s *Server) InnerRingIndex() int { - index, err := s.statusIndex.InnerRingIndex() +func (s *Server) InnerRingIndex(ctx context.Context) int { + index, err := s.statusIndex.InnerRingIndex(ctx) if err != nil { - s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) return -1 } @@ -70,10 +71,10 @@ func (s *Server) InnerRingIndex() int { // InnerRingSize is a getter for a global size of inner ring list. This value // paired with inner ring index. -func (s *Server) InnerRingSize() int { - size, err := s.statusIndex.InnerRingSize() +func (s *Server) InnerRingSize(ctx context.Context) int { + size, err := s.statusIndex.InnerRingSize(ctx) if err != nil { - s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) return 0 } @@ -82,28 +83,28 @@ func (s *Server) InnerRingSize() int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. -func (s *Server) AlphabetIndex() int { - index, err := s.statusIndex.AlphabetIndex() +func (s *Server) AlphabetIndex(ctx context.Context) int { + index, err := s.statusIndex.AlphabetIndex(ctx) if err != nil { - s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) return -1 } return int(index) } -func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error { +func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { validators := prm.Validators - index := s.InnerRingIndex() + index := s.InnerRingIndex(ctx) if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) + s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) return nil } if len(validators) == 0 { - s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) + s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) return nil } @@ -126,12 +127,12 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro } s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { - _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) + _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { - s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract, + s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) } }) @@ -140,9 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro // VoteForSidechainValidator calls vote method on alphabet contracts with // the provided list of keys. -func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error { +func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { sort.Sort(prm.Validators) - return s.voteForSidechainValidator(prm) + return s.voteForSidechainValidator(ctx, prm) } // ResetEpochTimer resets the block timer that produces events to update epoch @@ -153,14 +154,24 @@ func (s *Server) ResetEpochTimer(h uint32) error { return s.epochTimer.Reset() } -func (s *Server) setHealthStatus(hs control.HealthStatus) { +func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) { s.healthStatus.Store(int32(hs)) - s.notifySystemd(hs) + s.notifySystemd(ctx, hs) if s.irMetrics != nil { s.irMetrics.SetHealth(int32(hs)) } } +func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { + if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { + s.notifySystemd(ctx, newSt) + if s.irMetrics != nil { + s.irMetrics.SetHealth(int32(newSt)) + } + } + return +} + // HealthStatus returns the current health status of the IR application. func (s *Server) HealthStatus() control.HealthStatus { return control.HealthStatus(s.healthStatus.Load()) @@ -176,7 +187,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err return persistStorage, nil } -func (s *Server) notifySystemd(st control.HealthStatus) { +func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) { if !s.sdNotify { return } @@ -186,10 +197,12 @@ func (s *Server) notifySystemd(st control.HealthStatus) { err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled) case control.HealthStatus_SHUTTING_DOWN: err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled) + case control.HealthStatus_RECONFIGURING: + err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled) default: err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go index fe09f8f2d..f60ca87c4 100644 --- a/pkg/innerring/state_test.go +++ b/pkg/innerring/state_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "testing" "time" @@ -42,12 +43,12 @@ func TestServerState(t *testing.T) { require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration") var healthStatus control.HealthStatus = control.HealthStatus_READY - srv.setHealthStatus(healthStatus) + srv.setHealthStatus(context.Background(), healthStatus) require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") - require.True(t, srv.IsActive(), "invalid IsActive result") - require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result") - require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index") - require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index") - require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index") + require.True(t, srv.IsActive(context.Background()), "invalid IsActive result") + require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result") + require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index") + require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index") + require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index") } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index c787f9d5e..a6c40f9fa 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -58,7 +58,7 @@ func defaultCfg(c *cfg) { }, fullSizeLimit: 1 << 30, // 1GB objSizeLimit: 1 << 20, // 1MB - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), metrics: &NoopMetrics{}, } } @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))} + c.log = l } } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index caee770e8..95fdd844b 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -69,10 +69,10 @@ func TestBlobovnicza(t *testing.T) { defer os.Remove(p) // open Blobovnicza - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) // initialize Blobovnicza - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) // try to read non-existent address testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound) @@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) { return err == nil }, nil) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index aeaa4e1d5..4947512cc 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -1,6 +1,7 @@ package blobovnicza import ( + "context" "errors" "fmt" "path/filepath" @@ -15,7 +16,7 @@ import ( // // If the database file does not exist, it will be created automatically. // If blobovnicza is already open, does nothing. -func (b *Blobovnicza) Open() error { +func (b *Blobovnicza) Open(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error { return nil } - b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB, + b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB, zap.String("path", b.path), zap.Bool("ro", b.boltOptions.ReadOnly), ) @@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error { } } - b.log.Debug(logs.BlobovniczaOpeningBoltDB, + b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB, zap.String("path", b.path), zap.Stringer("permissions", b.perm), ) @@ -55,7 +56,7 @@ func (b *Blobovnicza) Open() error { // // If Blobovnicza is already initialized, no action is taken. // Blobovnicza must be open, otherwise an error will return. -func (b *Blobovnicza) Init() error { +func (b *Blobovnicza) Init(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error { return errors.New("blobovnicza is not open") } - b.log.Debug(logs.BlobovniczaInitializing, + b.log.Debug(ctx, logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) @@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error { size := b.dataSize.Load() items := b.itemsCount.Load() if size != 0 || items != 0 { - b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) return nil } @@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange, + b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -98,14 +99,14 @@ func (b *Blobovnicza) Init() error { } } - return b.initializeCounters() + return b.initializeCounters(ctx) } func (b *Blobovnicza) ObjectsCount() uint64 { return b.itemsCount.Load() } -func (b *Blobovnicza) initializeCounters() error { +func (b *Blobovnicza) initializeCounters(ctx context.Context) error { var size uint64 var items uint64 var sizeExists bool @@ -128,20 +129,20 @@ func (b *Blobovnicza) initializeCounters() error { }) }) if err != nil { - return fmt.Errorf("can't determine DB size: %w", err) + return fmt.Errorf("determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { - b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) if err := b.boltDB.Update(func(tx *bbolt.Tx) error { if err := saveDataSize(tx, size); err != nil { return err } return saveItemsCount(tx, items) }); err != nil { - b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) - return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) + return fmt.Errorf("save blobovnicza's size and items count: %w", err) } - b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } b.dataSize.Store(size) @@ -154,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error { // Close releases all internal database resources. // // If blobovnicza is already closed, does nothing. -func (b *Blobovnicza) Close() error { +func (b *Blobovnicza) Close(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error { return nil } - b.log.Debug(logs.BlobovniczaClosingBoltDB, + b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index 5d6787897..8f24b5675 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -6,7 +6,6 @@ import ( "syscall" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -91,10 +90,9 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err } if err == nil && found { - b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket, + b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(dataSize)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) b.itemDeleted(recordSize) } diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go index c464abc87..5a382c159 100644 --- a/pkg/local_object_storage/blobovnicza/get_test.go +++ b/pkg/local_object_storage/blobovnicza/get_test.go @@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") var blz *Blobovnicza - defer func() { require.NoError(t, blz.Close()) }() + defer func() { require.NoError(t, blz.Close(context.Background())) }() fnInit := func(szLimit uint64) { if blz != nil { - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } blz = New( @@ -26,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) { WithObjectSizeLimit(szLimit), ) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) } // initial distribution: [0:32K] (32K:64K] diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go index 01e5529da..cd33b263c 100644 --- a/pkg/local_object_storage/blobovnicza/iterate.go +++ b/pkg/local_object_storage/blobovnicza/iterate.go @@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, if prm.ignoreErrors { return nil } - return fmt.Errorf("could not decode address key: %w", err) + return fmt.Errorf("decode address key: %w", err) } } diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go index 5db1e4165..717274781 100644 --- a/pkg/local_object_storage/blobovnicza/iterate_test.go +++ b/pkg/local_object_storage/blobovnicza/iterate_test.go @@ -15,8 +15,8 @@ import ( func TestBlobovniczaIterate(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") b := New(WithPath(filename)) - require.NoError(t, b.Open()) - require.NoError(t, b.Init()) + require.NoError(t, b.Open(context.Background())) + require.NoError(t, b.Init(context.Background())) data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}} addr := oidtest.Address() diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go index 1dff75aed..9bbed0db5 100644 --- a/pkg/local_object_storage/blobovnicza/sizes.go +++ b/pkg/local_object_storage/blobovnicza/sizes.go @@ -57,3 +57,7 @@ func (b *Blobovnicza) itemDeleted(itemSize uint64) { func (b *Blobovnicza) IsFull() bool { return b.dataSize.Load() >= b.fullSizeLimit } + +func (b *Blobovnicza) FillPercent() int { + return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit))) +} diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go index 01093b8d7..d582fc5e4 100644 --- a/pkg/local_object_storage/blobovnicza/sizes_test.go +++ b/pkg/local_object_storage/blobovnicza/sizes_test.go @@ -42,7 +42,7 @@ func TestSizes(t *testing.T) { func BenchmarkUpperBound(b *testing.B) { for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} { b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { _ = upperPowerOfTwo(size) } }) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go index 603c6abe3..dbaa7387a 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "path/filepath" "sync" @@ -17,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza { return db.blz } -func (db *activeDB) Close() { - db.shDB.Close() +func (db *activeDB) Close(ctx context.Context) { + db.shDB.Close(ctx) } func (db *activeDB) SystemPath() string { @@ -53,8 +54,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager // GetOpenedActiveDBForLevel returns active DB for level. // DB must be closed after use. -func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) { - activeDB, err := m.getCurrentActiveIfOk(lvlPath) +func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) { + activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath) if err != nil { return nil, err } @@ -62,7 +63,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, return activeDB, nil } - return m.updateAndGetActive(lvlPath) + return m.updateAndGetActive(ctx, lvlPath) } func (m *activeDBManager) Open() { @@ -72,18 +73,18 @@ func (m *activeDBManager) Open() { m.closed = false } -func (m *activeDBManager) Close() { +func (m *activeDBManager) Close(ctx context.Context) { m.levelToActiveDBGuard.Lock() defer m.levelToActiveDBGuard.Unlock() for _, db := range m.levelToActiveDB { - db.Close() + db.Close(ctx) } m.levelToActiveDB = make(map[string]*sharedDB) m.closed = true } -func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) { +func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) { m.levelToActiveDBGuard.RLock() defer m.levelToActiveDBGuard.RUnlock() @@ -96,13 +97,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error return nil, nil } - blz, err := db.Open() // open db for usage, will be closed on activeDB.Close() + blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close() if err != nil { return nil, err } if blz.IsFull() { - db.Close() + db.Close(ctx) return nil, nil } @@ -112,11 +113,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error }, nil } -func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) { +func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) { m.levelLock.Lock(lvlPath) defer m.levelLock.Unlock(lvlPath) - current, err := m.getCurrentActiveIfOk(lvlPath) + current, err := m.getCurrentActiveIfOk(ctx, lvlPath) if err != nil { return nil, err } @@ -124,7 +125,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) return current, nil } - nextShDB, err := m.getNextSharedDB(lvlPath) + nextShDB, err := m.getNextSharedDB(ctx, lvlPath) if err != nil { return nil, err } @@ -133,7 +134,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) return nil, nil } - blz, err := nextShDB.Open() // open db for client, client must call Close() after usage + blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage if err != nil { return nil, err } @@ -143,7 +144,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) }, nil } -func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) { +func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) { var nextActiveDBIdx uint64 hasActive, currentIdx := m.hasActiveDB(lvlPath) if hasActive { @@ -160,17 +161,17 @@ func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) { path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx)) next := m.dbManager.GetByPath(path) - _, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close() + _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close() if err != nil { return nil, err } previous, updated := m.replace(lvlPath, next) if !updated && next != nil { - next.Close() // manager is closed, so don't hold active DB open + next.Close(ctx) // manager is closed, so don't hold active DB open } if updated && previous != nil { - previous.Close() + previous.Close(ctx) } return next, nil } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index 952203367..3e8b9f07b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -135,7 +135,7 @@ func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) { var hasDBs bool var maxIdx uint64 for _, e := range entries { - if e.IsDir() { + if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { continue } hasDBs = true @@ -158,16 +158,16 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Config) { +func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Config { +func (b *Blobovniczas) Compressor() *compression.Compressor { return b.compression } // SetReportErrorFunc implements common.Storage. -func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) { +func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) { b.reportError = f } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go index 5c103c1bb..04ff5120c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go @@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int, ch := cache.NewCache[string, *sharedDB](). WithTTL(ttl).WithLRU().WithMaxKeys(size). WithOnEvicted(func(_ string, db *sharedDB) { - db.Close() + db.Close(parentCtx) }) ctx, cancel := context.WithCancel(parentCtx) res := &dbCache{ @@ -81,12 +81,12 @@ func (c *dbCache) Close() { c.closed = true } -func (c *dbCache) GetOrCreate(path string) *sharedDB { +func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB { value := c.getExisted(path) if value != nil { return value } - return c.create(path) + return c.create(ctx, path) } func (c *dbCache) EvictAndMarkNonCached(path string) { @@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB { return nil } -func (c *dbCache) create(path string) *sharedDB { +func (c *dbCache) create(ctx context.Context, path string) *sharedDB { c.pathLock.Lock(path) defer c.pathLock.Unlock(path) @@ -133,12 +133,12 @@ func (c *dbCache) create(path string) *sharedDB { value = c.dbManager.GetByPath(path) - _, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed + _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed if err != nil { return value } if added := c.put(path, value); !added { - value.Close() + value.Close(ctx) } return value } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index 5bed86142..f87f4a144 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,7 +19,8 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), @@ -27,14 +28,14 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { require.NoError(t, st.Open(mode.ComponentReadWrite)) require.NoError(t, st.Init()) defer func() { - require.NoError(t, st.Close()) + require.NoError(t, st.Close(context.Background())) }() objGen := &testutil.SeqObjGenerator{ObjSize: 1} var cnt atomic.Int64 var wg sync.WaitGroup - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index a31e9d6cb..a6c1ce368 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -2,6 +2,7 @@ package blobovniczatree import ( "context" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -23,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error { // // Should be called exactly once. func (b *Blobovniczas) Init() error { - b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas) + b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas) if b.readOnly { - b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) + b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) return nil } @@ -40,36 +41,34 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(b.blzInitWorkerCount) - visited := make(map[string]struct{}) - err = b.iterateExistingDBPaths(egCtx, func(p string) (bool, error) { - visited[p] = struct{}{} - eg.Go(func() error { - shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open() - if err != nil { - return err - } - defer shBlz.Close() - - moveInfo, err := blz.ListMoveInfo(egCtx) - if err != nil { - return err - } - for _, move := range moveInfo { - b.deleteProtectedObjects.Add(move.Address) - } - - b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) - return nil - }) - return false, nil - }) - if err != nil { - _ = eg.Wait() - return err + if b.blzInitWorkerCount > 0 { + eg.SetLimit(b.blzInitWorkerCount + 1) } + eg.Go(func() error { + return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { + eg.Go(func() error { + p = strings.TrimSuffix(p, rebuildSuffix) + shBlz := b.getBlobovniczaWithoutCaching(p) + blz, err := shBlz.Open(egCtx) + if err != nil { + return err + } + defer shBlz.Close(egCtx) + moveInfo, err := blz.ListMoveInfo(egCtx) + if err != nil { + return err + } + for _, move := range moveInfo { + b.deleteProtectedObjects.Add(move.Address) + } + + b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) + return nil + }) + return false, nil + }) + }) return eg.Wait() } @@ -80,9 +79,9 @@ func (b *Blobovniczas) openManagers() { } // Close implements common.Storage. -func (b *Blobovniczas) Close() error { +func (b *Blobovniczas) Close(ctx context.Context) error { b.dbCache.Close() // order important - b.activeDBManager.Close() + b.activeDBManager.Close(ctx) b.commondbManager.Close() return nil @@ -91,8 +90,8 @@ func (b *Blobovniczas) Close() error { // returns blobovnicza with path p // // If blobovnicza is already cached, instance from cache is returned w/o changes. -func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB { - return b.dbCache.GetOrCreate(p) +func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB { + return b.dbCache.GetOrCreate(ctx, p) } func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB { diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index f0a32ded1..7db1891f9 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -2,6 +2,9 @@ package blobovniczatree import ( "context" + "os" + "path" + "strconv" "testing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -51,7 +54,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj35, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width blz = NewBlobovniczaTree( @@ -89,7 +92,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { }) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width back blz = NewBlobovniczaTree( @@ -127,5 +130,36 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj52, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) +} + +func TestInitBlobovniczasInitErrorType(t *testing.T) { + t.Parallel() + + rootDir := t.TempDir() + + for idx := 0; idx < 10; idx++ { + f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db")) + require.NoError(t, err) + _, err = f.Write([]byte("invalid db")) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + + blz := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaShallowDepth(1), + WithBlobovniczaShallowWidth(1), + WithRootPath(rootDir), + ) + + require.NoError(t, blz.Open(mode.ComponentReadWrite)) + err := blz.Init() + require.Contains(t, err.Error(), "open blobovnicza") + require.Contains(t, err.Error(), "invalid database") + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go index cf91637d7..b83849c77 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go @@ -16,17 +16,17 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) { b.metrics.ObjectsCount(time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount") + ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount") defer span.End() var result uint64 err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { shDB := b.getBlobovniczaWithoutCaching(p) - blz, err := shDB.Open() + blz, err := shDB.Open(ctx) if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) result += blz.ObjectsCount() return false, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 298de3ad6..d096791c3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -19,7 +18,10 @@ import ( "go.uber.org/zap" ) -var errObjectIsDeleteProtected = errors.New("object is delete protected") +var ( + errObjectIsDeleteProtected = errors.New("object is delete protected") + deleteRes = common.DeleteRes{} +) // Delete deletes object from blobovnicza tree. // @@ -43,17 +45,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co defer span.End() if b.readOnly { - return common.DeleteRes{}, common.ErrReadOnly + return deleteRes, common.ErrReadOnly } if b.rebuildGuard.TryRLock() { defer b.rebuildGuard.RUnlock() } else { - return common.DeleteRes{}, errRebuildInProgress + return deleteRes, errRebuildInProgress } if b.deleteProtectedObjects.Contains(prm.Address) { - return common.DeleteRes{}, errObjectIsDeleteProtected + return deleteRes, errObjectIsDeleteProtected } var bPrm blobovnicza.DeletePrm @@ -61,12 +63,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) if res, err = b.deleteObject(ctx, blz, bPrm); err == nil { success = true @@ -80,10 +82,9 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co res, err = b.deleteObjectFromLevel(ctx, bPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + zap.Error(err), ) } } @@ -98,7 +99,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err == nil && !objectFound { // not found in any blobovnicza - return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound)) } success = err == nil @@ -109,12 +110,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co // // returns no error if object was removed from some blobovnicza of the same level. func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) { - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { - return common.DeleteRes{}, err + return deleteRes, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.deleteObject(ctx, blz, prm) } @@ -122,5 +123,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz // removes object from blobovnicza and returns common.DeleteRes. func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { _, err := blz.Delete(ctx, prm) - return common.DeleteRes{}, err + return deleteRes, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index a64b2bbb1..0c5e48821 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "go.opentelemetry.io/otel/attribute" @@ -37,12 +36,12 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return common.ExistsRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) exists, err := blz.Exists(ctx, prm.Address) return common.ExistsRes{Exists: exists}, err @@ -55,10 +54,9 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common _, err := b.getObjectFromLevel(ctx, gPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index d6ffd8bce..df2b4ffe5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,7 +19,8 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -27,7 +28,7 @@ func TestExistsInvalidStorageID(t *testing.T) { WithBlobovniczaSize(1<<20)) require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) - defer func() { require.NoError(t, b.Close()) }() + defer func() { require.NoError(t, b.Close(context.Background())) }() obj := blobstortest.NewObject(1024) addr := object.AddressOf(obj) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index d390ecf1d..9244d765c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,7 +15,8 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -43,7 +44,8 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index 08cacda8a..e5c83e5f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -48,12 +47,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err = b.getObject(ctx, blz, bPrm) if err == nil { @@ -67,10 +66,9 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G res, err = b.getObjectFromLevel(ctx, bPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + zap.Error(err), ) } } @@ -95,12 +93,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G // returns error if object could not be read from any blobovnicza of the same level. func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) { // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObject(ctx, blz, prm) } @@ -115,13 +113,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index d237ae439..27d13f4f3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -47,12 +46,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err := b.getObjectRange(ctx, blz, prm) if err == nil { @@ -69,10 +68,9 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { outOfBounds := isErrOutOfRange(err) if !outOfBounds && !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if outOfBounds { return true, err @@ -103,12 +101,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re // returns error if object could not be read from any blobovnicza of the same level. func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) { // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObjectRange(ctx, blz, prm) } @@ -130,13 +128,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err) } from := prm.Range.GetOffset() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index af3d9e720..ceb8fb7e3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -42,14 +42,14 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm data, err := b.compression.Decompress(elem.ObjectData()) if err != nil { if prm.IgnoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", elem.Address()), - zap.String("err", err.Error()), + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return nil } - return fmt.Errorf("could not decompress object data: %w", err) + return fmt.Errorf("decompress object data: %w", err) } if prm.Handler != nil { @@ -72,19 +72,19 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm // iterator over all Blobovniczas in unsorted order. Break on f's error return. func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error { return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { - shBlz := b.getBlobovnicza(p) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, p) + blz, err := shBlz.Open(ctx) if err != nil { if ignoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return false, nil } - return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) + return false, fmt.Errorf("open blobovnicza %s: %w", p, err) } - defer shBlz.Close() + defer shBlz.Close(ctx) err = f(p, blz) @@ -188,11 +188,11 @@ func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string b.dbFilesGuard.RLock() defer b.dbFilesGuard.RUnlock() - _, err := b.iterateExistingDBPathsDFS(ctx, "", f) + _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) }) return err } -func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path string, f func(string) (bool, error)) (bool, error) { +func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) { sysPath := filepath.Join(b.rootPath, path) entries, err := os.ReadDir(sysPath) if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode @@ -208,7 +208,7 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin default: } if entry.IsDir() { - stop, err := b.iterateExistingDBPathsDFS(ctx, filepath.Join(path, entry.Name()), f) + stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter) if err != nil { return false, err } @@ -216,6 +216,9 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin return true, nil } } else { + if !fileFilter(entry.Name()) { + continue + } stop, err := f(filepath.Join(path, entry.Name())) if err != nil { return false, err @@ -228,6 +231,15 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin return false, nil } +// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order. +func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error { + b.dbFilesGuard.RLock() + defer b.dbFilesGuard.RUnlock() + + _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) }) + return err +} + func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error { b.dbFilesGuard.RLock() defer b.dbFilesGuard.RUnlock() @@ -237,6 +249,12 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres } func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) { + select { + case <-ctx.Done(): + return false, ctx.Err() + default: + } + sysPath := filepath.Join(b.rootPath, path) entries, err := os.ReadDir(sysPath) if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode @@ -249,6 +267,9 @@ func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path st var dirIdxs []uint64 for _, entry := range entries { + if strings.HasSuffix(entry.Name(), rebuildSuffix) { + continue + } idx := u64FromHexString(entry.Name()) if entry.IsDir() { dirIdxs = append(dirIdxs, idx) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 4fdde15a9..6438f715b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "errors" "fmt" "os" @@ -48,7 +49,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool, } } -func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) { +func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { if b.closedFlag.Load() { return nil, errClosed } @@ -67,11 +68,11 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) { blobovnicza.WithMetrics(b.metrics), )...) - if err := blz.Open(); err != nil { - return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) + if err := blz.Open(ctx); err != nil { + return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err) } - if err := blz.Init(); err != nil { - return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) + if err := blz.Init(ctx); err != nil { + return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err) } b.refCount++ @@ -81,22 +82,22 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) { return blz, nil } -func (b *sharedDB) Close() { +func (b *sharedDB) Close(ctx context.Context) { b.cond.L.Lock() defer b.cond.L.Unlock() if b.refCount == 0 { - b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) + b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) b.cond.Broadcast() return } if b.refCount == 1 { b.refCount = 0 - if err := b.blcza.Close(); err != nil { - b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) } b.blcza = nil @@ -110,7 +111,7 @@ func (b *sharedDB) Close() { } } -func (b *sharedDB) CloseAndRemoveFile() error { +func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { b.cond.L.Lock() if b.refCount > 1 { b.cond.Wait() @@ -121,12 +122,12 @@ func (b *sharedDB) CloseAndRemoveFile() error { return errClosingClosedBlobovnicza } - if err := b.blcza.Close(); err != nil { - b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) - return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err) + return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err) } b.refCount = 0 @@ -140,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDbManager struct { +// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDBManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -156,8 +157,8 @@ type levelDbManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDbManager { - result := &levelDbManager{ +) *levelDBManager { + result := &levelDBManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -172,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -180,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -201,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDbManager) hasAnyDB() bool { +func (m *levelDBManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -212,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDbManager + levelToManager map[string]*levelDBManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -230,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDbManager), + levelToManager: make(map[string]*levelDBManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -265,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -273,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 008be9543..5f268b0f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "io/fs" "time" @@ -18,9 +19,9 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Config + compression *compression.Compressor blzOpts []blobovnicza.Option - reportError func(string, error) // reportError is the function called when encountering disk errors. + reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics waitBeforeDropDB time.Duration blzInitWorkerCount int @@ -47,14 +48,14 @@ const ( func initConfig(c *cfg) { *c = cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), perm: defaultPerm, openedCacheSize: defaultOpenedCacheSize, openedCacheTTL: defaultOpenedCacheTTL, openedCacheExpInterval: defaultOpenedCacheInterval, blzShallowDepth: defaultBlzShallowDepth, blzShallowWidth: defaultBlzShallowWidth, - reportError: func(string, error) {}, + reportError: func(context.Context, string, error) {}, metrics: &noopMetrics{}, waitBeforeDropDB: defaultWaitBeforeDropDB, blzInitWorkerCount: defaultBlzInitWorkerCount, @@ -62,10 +63,15 @@ func initConfig(c *cfg) { } } -func WithLogger(l *logger.Logger) Option { +func WithBlobovniczaTreeLogger(log *logger.Logger) Option { return func(c *cfg) { - c.log = l - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) + c.log = log + } +} + +func WithBlobovniczaLogger(log *logger.Logger) Option { + return func(c *cfg) { + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 76c4953e4..37c49d741 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -77,37 +76,34 @@ type putIterator struct { } func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) { - active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath) + active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, + zap.Error(err)) } return false, nil } if active == nil { - i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } - defer active.Close() + defer active.Close(ctx) i.AllFull = false _, err = active.Blobovnicza().Put(ctx, i.PutPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, + i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if errors.Is(err, blobovnicza.ErrNoSpace) { i.AllFull = true diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 93ef8ba2e..a840275b8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -4,8 +4,10 @@ import ( "bytes" "context" "errors" + "fmt" "os" "path/filepath" + "strings" "sync" "sync/atomic" "time" @@ -19,6 +21,8 @@ import ( "golang.org/x/sync/errgroup" ) +const rebuildSuffix = ".rebuild" + var ( errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed") errBatchFull = errors.New("batch full") @@ -45,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes - b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) + b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) + completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += completedPreviosMoves if err != nil { - b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) success = false return res, err } - b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess) + b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess) - b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild) - dbsToMigrate, err := b.getDBsToRebuild(ctx) + b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild) + dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent) if err != nil { - b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err)) success = false return res, err } - b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate))) + b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate))) res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res) if err != nil { success = false @@ -74,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) { var completedDBCount uint32 for _, db := range dbs { - b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) + b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) + movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += movedObjects if err != nil { - b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) return res, err } - b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects)) + b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects)) res.FilesRemoved++ completedDBCount++ b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs))) @@ -90,7 +94,26 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common. return res, nil } -func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) { +func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) { + withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx) + if err != nil { + return nil, err + } + withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent) + if err != nil { + return nil, err + } + for k := range withFillPercent { + withSchemaChange[k] = struct{}{} + } + result := make([]string, 0, len(withSchemaChange)) + for db := range withSchemaChange { + result = append(result, db) + } + return result, nil +} + +func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) { dbsToMigrate := make(map[string]struct{}) if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) { dbsToMigrate[s] = struct{}{} @@ -104,16 +127,77 @@ func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) { }); err != nil { return nil, err } - result := make([]string, 0, len(dbsToMigrate)) - for db := range dbsToMigrate { - result = append(result, db) + return dbsToMigrate, nil +} + +func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) { + if target <= 0 || target > 100 { + return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target) + } + result := make(map[string]struct{}) + if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) { + dir := filepath.Join(b.rootPath, lvlPath) + entries, err := os.ReadDir(dir) + if os.IsNotExist(err) { // non initialized tree + return false, nil + } + if err != nil { + return false, err + } + hasDBs := false + // db with maxIdx could be an active, so it should not be rebuilded + var maxIdx uint64 + for _, e := range entries { + if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { + continue + } + hasDBs = true + maxIdx = max(u64FromHexString(e.Name()), maxIdx) + } + if !hasDBs { + return false, nil + } + for _, e := range entries { + if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { + continue + } + if u64FromHexString(e.Name()) == maxIdx { + continue + } + path := filepath.Join(lvlPath, e.Name()) + resettlementRequired, err := b.rebuildBySize(ctx, path, target) + if err != nil { + return false, err + } + if resettlementRequired { + result[path] = struct{}{} + } + } + return false, nil + }); err != nil { + return nil, err } return result, nil } -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { - shDB := b.getBlobovnicza(path) - blz, err := shDB.Open() +func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) { + shDB := b.getBlobovnicza(ctx, path) + blz, err := shDB.Open(ctx) + if err != nil { + return false, err + } + defer shDB.Close(ctx) + fp := blz.FillPercent() + // accepted fill percent defines as + // |----|+++++++++++++++++|+++++++++++++++++|--------------- + // 0% target 100% 100+(100 - target) + // where `+` - accepted fill percent, `-` - not accepted fill percent + return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil +} + +func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { + shDB := b.getBlobovnicza(ctx, path) + blz, err := shDB.Open(ctx) if err != nil { return 0, err } @@ -122,18 +206,39 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if shDBClosed { return } - shDB.Close() + shDB.Close(ctx) }() - - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter) + dropTempFile, err := b.addRebuildTempFile(ctx, path) + if err != nil { + return 0, err + } + migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) if err != nil { return migratedObjects, err } shDBClosed, err = b.dropDB(ctx, path, shDB) + if err == nil { + // drop only on success to continue rebuild on error + dropTempFile() + } return migratedObjects, err } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { + sysPath := filepath.Join(b.rootPath, path) + sysPath += rebuildSuffix + _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) + if err != nil { + return nil, err + } + return func() { + if err := os.Remove(sysPath); err != nil { + b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + } + }, nil +} + +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -148,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn }) for { - _, err := blz.Iterate(ctx, prm) + release, err := limiter.ReadRequest(ctx) + if err != nil { + return result.Load(), err + } + _, err = blz.Iterate(ctx, prm) + release() if err != nil && !errors.Is(err, errBatchFull) { return result.Load(), err } @@ -160,16 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - addr := addr - data := data - - if err := limiter.AcquireWorkSlot(egCtx); err != nil { + release, err := limiter.AcquireWorkSlot(egCtx) + if err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { - defer limiter.ReleaseWorkSlot() - err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) + defer release() + moveRelease, err := limiter.WriteRequest(ctx) + if err != nil { + return err + } + err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) + moveRelease() if err == nil { result.Add(1) } @@ -215,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -228,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(); err != nil { + if err := shDB.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -257,41 +370,67 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { return b.dropDirectoryIfEmpty(filepath.Dir(path)) } -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) { +func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { var count uint64 - return count, b.iterateExistingDBPaths(ctx, func(s string) (bool, error) { - shDB := b.getBlobovnicza(s) - blz, err := shDB.Open() + var rebuildTempFilesToRemove []string + err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { + rebuildTmpFilePath := s + s = strings.TrimSuffix(s, rebuildSuffix) + shDB := b.getBlobovnicza(ctx, s) + blz, err := shDB.Open(ctx) if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) + release, err := rateLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } incompletedMoves, err := blz.ListMoveInfo(ctx) + release() if err != nil { return true, err } for _, move := range incompletedMoves { - if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return false, err + } + err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) + release() + if err != nil { return true, err } count++ } + rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath) return false, nil }) + for _, tmp := range rebuildTempFilesToRemove { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return count, err + } + if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { + b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + } + release() + } + return count, err } func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string, move blobovnicza.MoveInfo, metaStore common.MetaStorage, ) error { - targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path()) - target, err := targetDB.Open() + targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path()) + target, err := targetDB.Open(ctx) if err != nil { return err } - defer targetDB.Close() + defer targetDB.Close(ctx) existsInSource := true var gPrm blobovnicza.GetPrm @@ -301,14 +440,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob if client.IsErrObjectNotFound(err) { existsInSource = false } else { - b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) return err } } if !existsInSource { // object was deleted by Rebuild, need to delete move info if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) return err } b.deleteProtectedObjects.Delete(move.Address) @@ -317,7 +456,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob existsInTarget, err := target.Exists(ctx, move.Address) if err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) return err } @@ -327,25 +466,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob putPrm.SetMarshaledObject(gRes.Object()) _, err = target.Put(ctx, putPrm) if err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err)) return err } } if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address)) return err } var deletePrm blobovnicza.DeletePrm deletePrm.SetAddress(move.Address) if _, err = source.Delete(ctx, deletePrm); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err)) return err } if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) return err } @@ -365,21 +504,21 @@ type moveIterator struct { } func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) { - target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath) + target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) } return false, nil } if target == nil { - i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) + i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } - defer target.Close() + defer target.Close(ctx) i.AllFull = false @@ -391,9 +530,9 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, TargetStorageID: targetStorageID.Bytes(), }); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } @@ -407,15 +546,15 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, _, err = target.Blobovnicza().Put(ctx, putPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) } return true, nil } if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil { - i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address)) return true, nil } @@ -423,18 +562,18 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, deletePrm.SetAddress(i.Address) if _, err = i.Source.Delete(ctx, deletePrm); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index a6afed60c..4146ef260 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -3,6 +3,7 @@ package blobovniczatree import ( "bytes" "context" + "os" "path/filepath" "sync" "testing" @@ -34,8 +35,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -52,7 +53,9 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) + _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) + require.NoError(t, err) testRebuildFailoverValidate(t, dir, obj, true) } @@ -62,8 +65,8 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -80,16 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) + + _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) + require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, true) } @@ -99,8 +105,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -111,11 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) + + _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) + require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) var pPrm blobovnicza.PutPrm pPrm.SetAddress(object.AddressOf(obj)) @@ -123,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, false) } @@ -131,12 +140,13 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), WithRootPath(dir), - WithBlobovniczaSize(100*1024*1024), + WithBlobovniczaSize(10*1024), WithWaitBeforeDropDB(0), WithOpenedCacheSize(1000)) require.NoError(t, b.Open(mode.ComponentReadWrite)) @@ -152,19 +162,22 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object storageIDs: make(map[oid.Address][]byte), guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 1, }) require.NoError(t, err) require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) moveInfo, err := blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -175,11 +188,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object _, err = blz.Get(context.Background(), gPrm) require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Open(context.Background())) + require.NoError(t, blz.Init(context.Background())) moveInfo, err = blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -193,5 +206,8 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)])) } - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) + + _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild")) + require.True(t, os.IsNotExist(err)) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index 7a1de4c13..a7a99fec3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -2,7 +2,9 @@ package blobovniczatree import ( "context" + "fmt" "sync" + "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -15,7 +17,7 @@ import ( "golang.org/x/sync/errgroup" ) -func TestBlobovniczaTreeRebuild(t *testing.T) { +func TestBlobovniczaTreeSchemaRebuild(t *testing.T) { t.Parallel() t.Run("width increased", func(t *testing.T) { @@ -39,13 +41,276 @@ func TestBlobovniczaTreeRebuild(t *testing.T) { }) } +func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { + t.Parallel() + + t.Run("no rebuild by fill percent", func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + b := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithObjectSizeLimit(64*1024), + WithBlobovniczaShallowWidth(1), // single directory + WithBlobovniczaShallowDepth(1), + WithRootPath(dir), + WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza + WithWaitBeforeDropDB(0), + WithOpenedCacheSize(1000), + WithMoveBatchSize(3)) + require.NoError(t, b.Open(mode.ComponentReadWrite)) + require.NoError(t, b.Init()) + + storageIDs := make(map[oid.Address][]byte) + for range 100 { + obj := blobstortest.NewObject(64 * 1024) // 64KB object + data, err := obj.Marshal() + require.NoError(t, err) + var prm common.PutPrm + prm.Address = object.AddressOf(obj) + prm.RawData = data + res, err := b.Put(context.Background(), prm) + require.NoError(t, err) + storageIDs[prm.Address] = res.StorageID + } + metaStub := &storageIDUpdateStub{ + storageIDs: storageIDs, + guard: &sync.Mutex{}, + } + limiter := &rebuildLimiterStub{} + rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 60, + }) + require.NoError(t, err) + dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 + require.False(t, dataMigrated) + + for addr, storageID := range storageIDs { + var gPrm common.GetPrm + gPrm.Address = addr + gPrm.StorageID = storageID + _, err := b.Get(context.Background(), gPrm) + require.NoError(t, err) + } + + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) + }) + + t.Run("no rebuild single db", func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + b := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithObjectSizeLimit(64*1024), + WithBlobovniczaShallowWidth(1), // single directory + WithBlobovniczaShallowDepth(1), + WithRootPath(dir), + WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza + WithWaitBeforeDropDB(0), + WithOpenedCacheSize(1000), + WithMoveBatchSize(3)) + require.NoError(t, b.Open(mode.ComponentReadWrite)) + require.NoError(t, b.Init()) + + storageIDs := make(map[oid.Address][]byte) + obj := blobstortest.NewObject(64 * 1024) // 64KB object + data, err := obj.Marshal() + require.NoError(t, err) + var prm common.PutPrm + prm.Address = object.AddressOf(obj) + prm.RawData = data + res, err := b.Put(context.Background(), prm) + require.NoError(t, err) + storageIDs[prm.Address] = res.StorageID + metaStub := &storageIDUpdateStub{ + storageIDs: storageIDs, + guard: &sync.Mutex{}, + } + limiter := &rebuildLimiterStub{} + rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 90, // 64KB / 100KB = 64% + }) + require.NoError(t, err) + dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 + require.False(t, dataMigrated) + + for addr, storageID := range storageIDs { + var gPrm common.GetPrm + gPrm.Address = addr + gPrm.StorageID = storageID + _, err := b.Get(context.Background(), gPrm) + require.NoError(t, err) + } + + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) + }) + + t.Run("rebuild by fill percent", func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + b := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithObjectSizeLimit(64*1024), + WithBlobovniczaShallowWidth(1), // single directory + WithBlobovniczaShallowDepth(1), + WithRootPath(dir), + WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza + WithWaitBeforeDropDB(0), + WithOpenedCacheSize(1000), + WithMoveBatchSize(3)) + require.NoError(t, b.Open(mode.ComponentReadWrite)) + require.NoError(t, b.Init()) + + storageIDs := make(map[oid.Address][]byte) + toDelete := make(map[oid.Address][]byte) + for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created + obj := blobstortest.NewObject(64 * 1024) + data, err := obj.Marshal() + require.NoError(t, err) + var prm common.PutPrm + prm.Address = object.AddressOf(obj) + prm.RawData = data + res, err := b.Put(context.Background(), prm) + require.NoError(t, err) + storageIDs[prm.Address] = res.StorageID + if i%2 == 1 { + toDelete[prm.Address] = res.StorageID + } + } + for addr, storageID := range toDelete { + var prm common.DeletePrm + prm.Address = addr + prm.StorageID = storageID + _, err := b.Delete(context.Background(), prm) + require.NoError(t, err) + } + metaStub := &storageIDUpdateStub{ + storageIDs: storageIDs, + guard: &sync.Mutex{}, + } + limiter := &rebuildLimiterStub{} + rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, + }) + require.NoError(t, err) + require.Equal(t, uint64(49), rRes.FilesRemoved) + require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects + require.Equal(t, uint64(49), metaStub.updatedCount) + + for addr, storageID := range storageIDs { + if _, found := toDelete[addr]; found { + continue + } + var gPrm common.GetPrm + gPrm.Address = addr + gPrm.StorageID = storageID + _, err := b.Get(context.Background(), gPrm) + require.NoError(t, err) + } + + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) + }) + + t.Run("rebuild by overflow", func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + b := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithObjectSizeLimit(64*1024), + WithBlobovniczaShallowWidth(1), // single directory + WithBlobovniczaShallowDepth(1), + WithRootPath(dir), + WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza + WithWaitBeforeDropDB(0), + WithOpenedCacheSize(1000), + WithMoveBatchSize(3)) + require.NoError(t, b.Open(mode.ComponentReadWrite)) + require.NoError(t, b.Init()) + + storageIDs := make(map[oid.Address][]byte) + for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created + obj := blobstortest.NewObject(64 * 1024) + data, err := obj.Marshal() + require.NoError(t, err) + var prm common.PutPrm + prm.Address = object.AddressOf(obj) + prm.RawData = data + res, err := b.Put(context.Background(), prm) + require.NoError(t, err) + storageIDs[prm.Address] = res.StorageID + } + metaStub := &storageIDUpdateStub{ + storageIDs: storageIDs, + guard: &sync.Mutex{}, + } + require.NoError(t, b.Close(context.Background())) + b = NewBlobovniczaTree( + context.Background(), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithObjectSizeLimit(64*1024), + WithBlobovniczaShallowWidth(1), + WithBlobovniczaShallowDepth(1), + WithRootPath(dir), + WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza + WithWaitBeforeDropDB(0), + WithOpenedCacheSize(1000), + WithMoveBatchSize(3)) + require.NoError(t, b.Open(mode.ComponentReadWrite)) + require.NoError(t, b.Init()) + + limiter := &rebuildLimiterStub{} + rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, + }) + require.NoError(t, err) + require.Equal(t, uint64(49), rRes.FilesRemoved) + require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects + require.Equal(t, uint64(98), metaStub.updatedCount) + + for addr, storageID := range storageIDs { + var gPrm common.GetPrm + gPrm.Address = addr + gPrm.StorageID = storageID + _, err := b.Get(context.Background(), gPrm) + require.NoError(t, err) + } + + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) + }) +} + func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { t.Parallel() dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -69,11 +334,12 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs := make(map[oid.Address][]byte) storageIDs[prm.Address] = res.StorageID - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -89,9 +355,11 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter + rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -105,14 +373,16 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -127,7 +397,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta eg, egCtx := errgroup.WithContext(context.Background()) storageIDs := make(map[oid.Address][]byte) storageIDsGuard := &sync.Mutex{} - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -149,11 +419,12 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, eg.Wait()) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), @@ -177,9 +448,11 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter + rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -193,7 +466,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } type storageIDUpdateStub struct { @@ -211,7 +485,36 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr return nil } -type rebuildLimiterStub struct{} +type rebuildLimiterStub struct { + slots atomic.Int64 + readRequests atomic.Int64 + writeRequests atomic.Int64 +} -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil } -func (s *rebuildLimiterStub) ReleaseWorkSlot() {} +func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { + s.slots.Add(1) + return func() { s.slots.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { + s.readRequests.Add(1) + return func() { s.readRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { + s.writeRequests.Add(1) + return func() { s.writeRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ValidateReleased() error { + if v := s.slots.Load(); v != 0 { + return fmt.Errorf("invalid slots value %d", v) + } + if v := s.readRequests.Load(); v != 0 { + return fmt.Errorf("invalid read requests value %d", v) + } + if v := s.writeRequests.Load(); v != 0 { + return fmt.Errorf("invalid write requests value %d", v) + } + return nil +} diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index 6f579a8ca..ceaf2538a 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -1,6 +1,7 @@ package blobstor import ( + "context" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -40,14 +41,14 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Config + compression compression.Compressor log *logger.Logger storage []SubStorage metrics Metrics } func initConfig(c *cfg) { - c.log = &logger.Logger{Logger: zap.L()} + c.log = logger.NewLoggerWrapper(zap.L()) c.metrics = &noopMetrics{} } @@ -90,56 +91,19 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))} + c.log = l } } -// WithCompressObjects returns option to toggle -// compression of the stored objects. -// -// If true, Zstandard algorithm is used for data compression. -// -// If compressor (decompressor) creation failed, -// the uncompressed option will be used, and the error -// is recorded in the provided log. -func WithCompressObjects(comp bool) Option { +func WithCompression(comp compression.Config) Option { return func(c *cfg) { - c.compression.Enabled = comp - } -} - -// WithCompressibilityEstimate returns an option to use -// normilized compressibility estimate to decide compress -// data or not. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimate(v bool) Option { - return func(c *cfg) { - c.compression.UseCompressEstimation = v - } -} - -// WithCompressibilityEstimateThreshold returns an option to set -// normilized compressibility estimate threshold. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimateThreshold(threshold float64) Option { - return func(c *cfg) { - c.compression.CompressEstimationThreshold = threshold - } -} - -// WithUncompressableContentTypes returns option to disable decompression -// for specific content types as seen by object.AttributeContentType attribute. -func WithUncompressableContentTypes(values []string) Option { - return func(c *cfg) { - c.compression.UncompressableContentTypes = values + c.compression.Config = comp } } // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. -func (b *BlobStor) SetReportErrorFunc(f func(string, error)) { +func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) { for i := range b.storage { b.storage[i].Storage.SetReportErrorFunc(f) } @@ -151,6 +115,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Config { - return &b.cfg.compression +func (b *BlobStor) Compressor() *compression.Compressor { + return &b.compression } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index c7d80dc84..6ddeb6f00 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -51,16 +52,18 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompressObjects(compress), + WithCompression(compression.Config{ + Enabled: compress, + }), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } bigObj := make([]*objectSDK.Object, objCount) smallObj := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { + for i := range objCount { bigObj[i] = testObject(smallSizeLimit * 2) smallObj[i] = testObject(smallSizeLimit / 2) } @@ -91,20 +94,20 @@ func TestCompression(t *testing.T) { blobStor := newBlobStor(t, false) testPut(t, blobStor, 0) testGet(t, blobStor, 0) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, true) testGet(t, blobStor, 0) // get uncompressed object with compress enabled testPut(t, blobStor, 1) testGet(t, blobStor, 1) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, false) testGet(t, blobStor, 0) // get old uncompressed object testGet(t, blobStor, 1) // get compressed object with compression disabled testPut(t, blobStor, 2) testGet(t, blobStor, 2) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) } func TestBlobstor_needsCompression(t *testing.T) { @@ -113,8 +116,10 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompressObjects(compress), - WithUncompressableContentTypes(ct), + WithCompression(compression.Config{ + Enabled: compress, + UncompressableContentTypes: ct, + }), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( @@ -130,7 +135,7 @@ func TestBlobstor_needsCompression(t *testing.T) { }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } @@ -192,7 +197,7 @@ func TestConcurrentPut(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)}) @@ -219,7 +224,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, bigObj) @@ -235,7 +240,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount+1; i++ { + for range concurrentPutCount + 1 { wg.Add(1) go func() { testPutFileExistsError(t, blobStor, bigObj) @@ -251,7 +256,7 @@ func TestConcurrentPut(t *testing.T) { smallObj := testObject(smallSizeLimit / 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, smallObj) @@ -272,7 +277,7 @@ func TestConcurrentDelete(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { var prm common.PutPrm @@ -302,7 +307,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, bigObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, bigObj) @@ -319,7 +324,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, smallObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, smallObj) diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go index 1b04eab1a..c19e099cb 100644 --- a/pkg/local_object_storage/blobstor/common/delete.go +++ b/pkg/local_object_storage/blobstor/common/delete.go @@ -8,6 +8,7 @@ import ( type DeletePrm struct { Address oid.Address StorageID []byte + Size uint64 } // DeleteRes groups the resulting values of Delete operation. diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 9f629ef8c..788fe66f2 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -12,15 +12,27 @@ type RebuildRes struct { } type RebuildPrm struct { - MetaStorage MetaStorage - WorkerLimiter ConcurrentWorkersLimiter + MetaStorage MetaStorage + Limiter RebuildLimiter + FillPercent int } type MetaStorage interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() +type ReleaseFunc func() + +type ConcurrencyLimiter interface { + AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) +} + +type RateLimiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) +} + +type RebuildLimiter interface { + ConcurrencyLimiter + RateLimiter } diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 4f3a20993..e35c35e60 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -12,18 +12,18 @@ import ( type Storage interface { Open(mode mode.ComponentMode) error Init() error - Close() error + Close(context.Context) error Type() string Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Config) - Compressor() *compression.Config + SetCompressor(cc *compression.Compressor) + Compressor() *compression.Compressor // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. - SetReportErrorFunc(f func(string, error)) + SetReportErrorFunc(f func(context.Context, string, error)) SetParentID(parentID string) Get(context.Context, GetPrm) (GetRes, error) diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 986912985..445a0494b 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Config{Enabled: true} + c := Compressor{Config: Config{Enabled: true}} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,10 +33,10 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Config, data []byte) { +func benchWith(b *testing.B, c Compressor, data []byte) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { _ = c.Compress(data) } } @@ -56,8 +56,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) @@ -76,8 +78,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 85ab47692..c76cec9a1 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,21 +4,36 @@ import ( "bytes" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) +type Level string + +const ( + LevelDefault Level = "" + LevelOptimal Level = "optimal" + LevelFastest Level = "fastest" + LevelSmallestSize Level = "smallest_size" +) + +type Compressor struct { + Config + + encoder *zstd.Encoder + decoder *zstd.Decoder +} + // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string + Level Level - UseCompressEstimation bool - CompressEstimationThreshold float64 - - encoder *zstd.Encoder - decoder *zstd.Decoder + EstimateCompressibility bool + EstimateCompressibilityThreshold float64 } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -26,11 +41,11 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Config) Init() error { +func (c *Compressor) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil) + c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) if err != nil { return err } @@ -73,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Config) Decompress(data []byte) ([]byte, error) { +func (c *Compressor) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -82,13 +97,13 @@ func (c *Config) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Config) Compress(data []byte) []byte { +func (c *Compressor) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.UseCompressEstimation { + if c.EstimateCompressibility { estimated := compress.Estimate(data) - if estimated >= c.CompressEstimationThreshold { + if estimated >= c.EstimateCompressibilityThreshold { return c.compress(data) } return data @@ -96,7 +111,7 @@ func (c *Config) Compress(data []byte) []byte { return c.compress(data) } -func (c *Config) compress(data []byte) []byte { +func (c *Compressor) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -106,7 +121,7 @@ func (c *Config) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Config) Close() error { +func (c *Compressor) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -116,3 +131,24 @@ func (c *Config) Close() error { } return err } + +func (c *Config) HasValidCompressionLevel() bool { + return c.Level == LevelDefault || + c.Level == LevelOptimal || + c.Level == LevelFastest || + c.Level == LevelSmallestSize +} + +func (c *Compressor) compressionLevel() zstd.EncoderLevel { + switch c.Level { + case LevelDefault, LevelOptimal: + return zstd.SpeedDefault + case LevelFastest: + return zstd.SpeedFastest + case LevelSmallestSize: + return zstd.SpeedBestCompression + default: + assert.Fail("unknown compression level", string(c.Level)) + return zstd.SpeedDefault + } +} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 9b414a9be..0418eedd0 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,13 +6,14 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) // Open opens BlobStor. func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error { - b.log.Debug(logs.BlobstorOpening) + b.log.Debug(ctx, logs.BlobstorOpening) b.modeMtx.Lock() defer b.modeMtx.Unlock() @@ -50,9 +51,13 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // If BlobStor is already initialized, no action is taken. // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. -func (b *BlobStor) Init() error { - b.log.Debug(logs.BlobstorInitializing) +func (b *BlobStor) Init(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorInitializing) + if !b.compression.HasValidCompressionLevel() { + b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) + b.compression.Level = compression.LevelDefault + } if err := b.compression.Init(); err != nil { return err } @@ -67,14 +72,14 @@ func (b *BlobStor) Init() error { } // Close releases all internal resources of BlobStor. -func (b *BlobStor) Close() error { - b.log.Debug(logs.BlobstorClosing) +func (b *BlobStor) Close(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorClosing) var firstErr error for i := range b.storage { - err := b.storage[i].Storage.Close() + err := b.storage[i].Storage.Close(ctx) if err != nil { - b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err)) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go index c91508e6d..86d8f15e3 100644 --- a/pkg/local_object_storage/blobstor/delete.go +++ b/pkg/local_object_storage/blobstor/delete.go @@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del if err == nil || !client.IsErrObjectNotFound(err) { if err == nil { success = true - logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) + logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) } return res, err } @@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del res, err := st.Delete(ctx, prm) if err == nil { success = true - logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) + logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) } return res, err diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 43feec7c9..c155e15b8 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -73,10 +72,9 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi } for _, err := range errors[:len(errors)-1] { - b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return common.ExistsRes{}, errors[len(errors)-1] diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go index 783c198b2..7eb7d49bf 100644 --- a/pkg/local_object_storage/blobstor/exists_test.go +++ b/pkg/local_object_storage/blobstor/exists_test.go @@ -22,7 +22,7 @@ func TestExists(t *testing.T) { b := New(WithStorages(storages)) require.NoError(t, b.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, b.Init()) + require.NoError(t, b.Init(context.Background())) objects := []*objectSDK.Object{ testObject(smallSizeLimit / 2), diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go index c21d79f09..2544729f7 100644 --- a/pkg/local_object_storage/blobstor/fstree/control.go +++ b/pkg/local_object_storage/blobstor/fstree/control.go @@ -1,6 +1,8 @@ package fstree import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" ) @@ -28,7 +30,7 @@ func (t *FSTree) Init() error { } // Close implements common.Storage. -func (t *FSTree) Close() error { +func (t *FSTree) Close(_ context.Context) error { t.metrics.Close() return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index 718104e2e..3caee7ee1 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -1,22 +1,23 @@ package fstree import ( - "math" - "sync/atomic" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. type FileCounter interface { - Set(v uint64) - Inc() - Dec() + Set(count, size uint64) + Inc(size uint64) + Dec(size uint64) } type noopCounter struct{} -func (c *noopCounter) Set(uint64) {} -func (c *noopCounter) Inc() {} -func (c *noopCounter) Dec() {} +func (c *noopCounter) Set(uint64, uint64) {} +func (c *noopCounter) Inc(uint64) {} +func (c *noopCounter) Dec(uint64) {} func counterEnabled(c FileCounter) bool { _, noop := c.(*noopCounter) @@ -24,14 +25,45 @@ func counterEnabled(c FileCounter) bool { } type SimpleCounter struct { - v atomic.Uint64 + mtx sync.RWMutex + count uint64 + size uint64 } func NewSimpleCounter() *SimpleCounter { return &SimpleCounter{} } -func (c *SimpleCounter) Set(v uint64) { c.v.Store(v) } -func (c *SimpleCounter) Inc() { c.v.Add(1) } -func (c *SimpleCounter) Dec() { c.v.Add(math.MaxUint64) } -func (c *SimpleCounter) Value() uint64 { return c.v.Load() } +func (c *SimpleCounter) Set(count, size uint64) { + c.mtx.Lock() + defer c.mtx.Unlock() + + c.count = count + c.size = size +} + +func (c *SimpleCounter) Inc(size uint64) { + c.mtx.Lock() + defer c.mtx.Unlock() + + c.count++ + c.size += size +} + +func (c *SimpleCounter) Dec(size uint64) { + c.mtx.Lock() + defer c.mtx.Unlock() + + assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") + c.count-- + + assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") + c.size -= size +} + +func (c *SimpleCounter) CountSize() (uint64, uint64) { + c.mtx.RLock() + defer c.mtx.RUnlock() + + return c.count, c.size +} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 02580dbfa..112741ab4 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - *compression.Config + compressor *compression.Compressor Depth uint64 DirNameLen int @@ -82,12 +82,12 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - Config: nil, + compressor: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, fileCounter: &noopCounter{}, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), } for i := range opts { opts[i](f) @@ -152,8 +152,8 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr des, err := os.ReadDir(dirPath) if err != nil { if prm.IgnoreErrors { - t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, + zap.Error(err), zap.String("directory_path", dirPath)) return nil } @@ -196,13 +196,13 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) } if err != nil { if prm.IgnoreErrors { - t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), - zap.String("err", err.Error()), + zap.Error(err), zap.String("path", path)) continue } @@ -222,6 +222,81 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr return nil } +type ObjectInfo struct { + Address oid.Address + DataSize uint64 +} +type IterateInfoHandler func(ObjectInfo) error + +func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error { + var ( + err error + startedAt = time.Now() + ) + defer func() { + t.metrics.IterateInfo(time.Since(startedAt), err == nil) + }() + _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo") + defer span.End() + + return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler) +} + +func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error { + curName := strings.Join(curPath[1:], "") + dirPath := filepath.Join(curPath...) + entries, err := os.ReadDir(dirPath) + if err != nil { + return fmt.Errorf("read fstree dir '%s': %w", dirPath, err) + } + + isLast := depth >= t.Depth + l := len(curPath) + curPath = append(curPath, "") + + for i := range entries { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + curPath[l] = entries[i].Name() + + if !isLast && entries[i].IsDir() { + err := t.iterateInfo(ctx, depth+1, curPath, handler) + if err != nil { + return err + } + } + + if depth != t.Depth { + continue + } + + addr, err := addressFromString(curName + entries[i].Name()) + if err != nil { + continue + } + info, err := entries[i].Info() + if err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + + err = handler(ObjectInfo{ + Address: addr, + DataSize: uint64(info.Size()), + }) + if err != nil { + return err + } + } + + return nil +} + func (t *FSTree) treePath(addr oid.Address) string { sAddr := stringifyAddress(addr) @@ -263,7 +338,7 @@ func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Delet } p := t.treePath(prm.Address) - err = t.writer.removeFile(p) + err = t.writer.removeFile(p, prm.Size) return common.DeleteRes{}, err } @@ -330,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.Compress(prm.RawData) + prm.RawData = t.compressor.Compress(prm.RawData) } size = len(prm.RawData) @@ -373,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -435,32 +510,38 @@ func (t *FSTree) initFileCounter() error { return nil } - counter, err := t.countFiles() + count, size, err := t.countFiles() if err != nil { return err } - t.fileCounter.Set(counter) + t.fileCounter.Set(count, size) return nil } -func (t *FSTree) countFiles() (uint64, error) { - var counter uint64 +func (t *FSTree) countFiles() (uint64, uint64, error) { + var count, size uint64 // it is simpler to just consider every file // that is not directory as an object err := filepath.WalkDir(t.RootPath, func(_ string, d fs.DirEntry, _ error) error { - if !d.IsDir() { - counter++ + if d.IsDir() { + return nil } + count++ + info, err := d.Info() + if err != nil { + return err + } + size += uint64(info.Size()) return nil }, ) if err != nil { - return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } - return counter, nil + return count, size, nil } func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { @@ -496,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { }, ) if err != nil { - return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } success = true return result, nil @@ -516,16 +597,16 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Config) { - t.Config = cc +func (t *FSTree) SetCompressor(cc *compression.Compressor) { + t.compressor = cc } -func (t *FSTree) Compressor() *compression.Config { - return t.Config +func (t *FSTree) Compressor() *compression.Compressor { + return t.compressor } // SetReportErrorFunc implements common.Storage. -func (t *FSTree) SetReportErrorFunc(_ func(string, error)) { +func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) { // Do nothing, FSTree can encounter only one error which is returned. } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index d633cbac3..50dae46a7 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := addressFromString(s) if err != nil { b.Fatalf("benchmark error: %v", err) @@ -47,11 +47,12 @@ func TestObjectCounter(t *testing.T) { require.NoError(t, fst.Open(mode.ComponentReadWrite)) require.NoError(t, fst.Init()) - counterValue := counter.Value() - require.Equal(t, uint64(0), counterValue) + count, size := counter.CountSize() + require.Equal(t, uint64(0), count) + require.Equal(t, uint64(0), size) defer func() { - require.NoError(t, fst.Close()) + require.NoError(t, fst.Close(context.Background())) }() addr := oidtest.Address() @@ -64,39 +65,73 @@ func TestObjectCounter(t *testing.T) { putPrm.Address = addr putPrm.RawData, _ = obj.Marshal() - var getPrm common.GetPrm - getPrm.Address = putPrm.Address - var delPrm common.DeletePrm delPrm.Address = addr - eg, egCtx := errgroup.WithContext(context.Background()) + t.Run("without size hint", func(t *testing.T) { + eg, egCtx := errgroup.WithContext(context.Background()) - eg.Go(func() error { - for j := 0; j < 1_000; j++ { - _, err := fst.Put(egCtx, putPrm) - if err != nil { - return err + eg.Go(func() error { + for range 1_000 { + _, err := fst.Put(egCtx, putPrm) + if err != nil { + return err + } } - } - return nil + return nil + }) + + eg.Go(func() error { + var le logicerr.Logical + for range 1_000 { + _, err := fst.Delete(egCtx, delPrm) + if err != nil && !errors.As(err, &le) { + return err + } + } + return nil + }) + + require.NoError(t, eg.Wait()) + + count, size = counter.CountSize() + realCount, realSize, err := fst.countFiles() + require.NoError(t, err) + require.Equal(t, realCount, count, "real %d, actual %d", realCount, count) + require.Equal(t, realSize, size, "real %d, actual %d", realSize, size) }) - eg.Go(func() error { - var le logicerr.Logical - for j := 0; j < 1_000; j++ { - _, err := fst.Delete(egCtx, delPrm) - if err != nil && !errors.As(err, &le) { - return err + t.Run("with size hint", func(t *testing.T) { + delPrm.Size = uint64(len(putPrm.RawData)) + eg, egCtx := errgroup.WithContext(context.Background()) + + eg.Go(func() error { + for range 1_000 { + _, err := fst.Put(egCtx, putPrm) + if err != nil { + return err + } } - } - return nil + return nil + }) + + eg.Go(func() error { + var le logicerr.Logical + for range 1_000 { + _, err := fst.Delete(egCtx, delPrm) + if err != nil && !errors.As(err, &le) { + return err + } + } + return nil + }) + + require.NoError(t, eg.Wait()) + + count, size = counter.CountSize() + realCount, realSize, err := fst.countFiles() + require.NoError(t, err) + require.Equal(t, realCount, count, "real %d, actual %d", realCount, count) + require.Equal(t, realSize, size, "real %d, actual %d", realSize, size) }) - - require.NoError(t, eg.Wait()) - - counterValue = counter.Value() - realCount, err := fst.countFiles() - require.NoError(t, err) - require.Equal(t, realCount, counterValue) } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 8b2622885..6d633dad6 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -16,7 +16,7 @@ import ( type writer interface { writeData(string, []byte) error - removeFile(string) error + removeFile(string, uint64) error } type genericWriter struct { @@ -67,25 +67,22 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) { - switch pe.Err { - case syscall.ENOSPC: - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) - } + if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) } return err } if w.fileCounterEnabled { - w.fileCounter.Inc() + w.fileCounter.Inc(uint64(len(data))) var targetFileExists bool if _, e := os.Stat(p); e == nil { targetFileExists = true } err = os.Rename(tmpPath, p) if err == nil && targetFileExists { - w.fileCounter.Dec() + w.fileCounter.Dec(uint64(len(data))) } } else { err = os.Rename(tmpPath, p) @@ -107,15 +104,10 @@ func (w *genericWriter) writeFile(p string, data []byte) error { return err } -func (w *genericWriter) removeFile(p string) error { +func (w *genericWriter) removeFile(p string, size uint64) error { var err error if w.fileCounterEnabled { - w.fileGuard.Lock(p) - err = os.Remove(p) - w.fileGuard.Unlock(p) - if err == nil { - w.fileCounter.Dec() - } + err = w.removeWithCounter(p, size) } else { err = os.Remove(p) } @@ -125,3 +117,22 @@ func (w *genericWriter) removeFile(p string) error { } return err } + +func (w *genericWriter) removeWithCounter(p string, size uint64) error { + w.fileGuard.Lock(p) + defer w.fileGuard.Unlock(p) + + if size == 0 { + stat, err := os.Stat(p) + if err != nil { + return err + } + size = uint64(stat.Size()) + } + + if err := os.Remove(p); err != nil { + return err + } + w.fileCounter.Dec(size) + return nil +} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go index efc5a3d3d..49cbda344 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "golang.org/x/sys/unix" ) @@ -18,7 +19,9 @@ type linuxWriter struct { perm uint32 flags int - counter FileCounter + fileGuard keyLock + fileCounter FileCounter + fileCounterEnabled bool } func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer { @@ -33,11 +36,18 @@ func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync b return nil } _ = unix.Close(fd) // Don't care about error. + var fileGuard keyLock = &noopKeyLock{} + fileCounterEnabled := counterEnabled(c) + if fileCounterEnabled { + fileGuard = utilSync.NewKeyLocker[string]() + } w := &linuxWriter{ - root: root, - perm: uint32(perm), - flags: flags, - counter: c, + root: root, + perm: uint32(perm), + flags: flags, + fileGuard: fileGuard, + fileCounter: c, + fileCounterEnabled: fileCounterEnabled, } return w } @@ -51,24 +61,45 @@ func (w *linuxWriter) writeData(p string, data []byte) error { } func (w *linuxWriter) writeFile(p string, data []byte) error { + if w.fileCounterEnabled { + w.fileGuard.Lock(p) + defer w.fileGuard.Unlock(p) + } fd, err := unix.Open(w.root, w.flags, w.perm) if err != nil { return err } + written := 0 tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10) n, err := unix.Write(fd, data) - if err == nil { - if n == len(data) { + for err == nil { + written += n + + if written == len(data) { err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW) if err == nil { - w.counter.Inc() + w.fileCounter.Inc(uint64(len(data))) } if errors.Is(err, unix.EEXIST) { err = nil } - } else { - err = errors.New("incomplete write") + break } + + // From man 2 write: + // https://www.man7.org/linux/man-pages/man2/write.2.html + // + // Note that a successful write() may transfer fewer than count + // bytes. Such partial writes can occur for various reasons; for + // example, because there was insufficient space on the disk device + // to write all of the requested bytes, or because a blocked write() + // to a socket, pipe, or similar was interrupted by a signal handler + // after it had transferred some, but before it had transferred all + // of the requested bytes. In the event of a partial write, the + // caller can make another write() call to transfer the remaining + // bytes. The subsequent call will either transfer further bytes or + // may result in an error (e.g., if the disk is now full). + n, err = unix.Write(fd, data[written:]) } errClose := unix.Close(fd) if err != nil { @@ -77,13 +108,30 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { return errClose } -func (w *linuxWriter) removeFile(p string) error { +func (w *linuxWriter) removeFile(p string, size uint64) error { + if w.fileCounterEnabled { + w.fileGuard.Lock(p) + defer w.fileGuard.Unlock(p) + + if size == 0 { + var stat unix.Stat_t + err := unix.Stat(p, &stat) + if err != nil { + if err == unix.ENOENT { + return logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + return err + } + size = uint64(stat.Size) + } + } + err := unix.Unlink(p) if err != nil && err == unix.ENOENT { return logicerr.Wrap(new(apistatus.ObjectNotFound)) } if err == nil { - w.counter.Dec() + w.fileCounter.Dec(size) } return err } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go new file mode 100644 index 000000000..7fae2e695 --- /dev/null +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go @@ -0,0 +1,42 @@ +//go:build linux && integration + +package fstree + +import ( + "context" + "errors" + "os" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +func TestENOSPC(t *testing.T) { + dir, err := os.MkdirTemp(t.TempDir(), "ramdisk") + require.NoError(t, err) + + f, err := os.CreateTemp(t.TempDir(), "ramdisk_*") + require.NoError(t, err) + + err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M") + if errors.Is(err, unix.EPERM) { + t.Skipf("skip size tests: no permission to mount: %v", err) + return + } + require.NoError(t, err) + defer func() { + require.NoError(t, unix.Unmount(dir, 0)) + }() + + fst := New(WithPath(dir), WithDepth(1)) + require.NoError(t, fst.Open(mode.ComponentReadWrite)) + require.NoError(t, fst.Init()) + + _, err = fst.Put(context.Background(), common.PutPrm{ + RawData: make([]byte, 10<<20), + }) + require.ErrorIs(t, err, common.ErrNoSpace) +} diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go index 10de935eb..4241beec9 100644 --- a/pkg/local_object_storage/blobstor/fstree/metrics.go +++ b/pkg/local_object_storage/blobstor/fstree/metrics.go @@ -13,6 +13,7 @@ type Metrics interface { Close() Iterate(d time.Duration, success bool) + IterateInfo(d time.Duration, success bool) Delete(d time.Duration, success bool) Exists(d time.Duration, success bool) Put(d time.Duration, size int, success bool) @@ -27,6 +28,7 @@ func (m *noopMetrics) SetParentID(string) {} func (m *noopMetrics) SetMode(mode.ComponentMode) {} func (m *noopMetrics) Close() {} func (m *noopMetrics) Iterate(time.Duration, bool) {} +func (m *noopMetrics) IterateInfo(time.Duration, bool) {} func (m *noopMetrics) Delete(time.Duration, bool) {} func (m *noopMetrics) Exists(time.Duration, bool) {} func (m *noopMetrics) Put(time.Duration, int, bool) {} diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 4d1f8fc22..6f2ac87e1 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,7 +4,6 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type Option func(*FSTree) @@ -53,6 +52,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))} + f.log = l } } diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go index 8a5bb870a..c1c47f3bb 100644 --- a/pkg/local_object_storage/blobstor/info.go +++ b/pkg/local_object_storage/blobstor/info.go @@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) { eg, egCtx := errgroup.WithContext(ctx) for i := range b.storage { - i := i eg.Go(func() error { v, e := b.storage[i].Storage.ObjectsCount(egCtx) if e != nil { diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go index c08e39bf1..5d14a9a3a 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go @@ -27,21 +27,21 @@ type objectDesc struct { storageID []byte } -func TestAll(t *testing.T, cons Constructor, min, max uint64) { +func TestAll(t *testing.T, cons Constructor, minSize, maxSize uint64) { t.Run("get", func(t *testing.T) { - TestGet(t, cons, min, max) + TestGet(t, cons, minSize, maxSize) }) t.Run("get range", func(t *testing.T) { - TestGetRange(t, cons, min, max) + TestGetRange(t, cons, minSize, maxSize) }) t.Run("delete", func(t *testing.T) { - TestDelete(t, cons, min, max) + TestDelete(t, cons, minSize, maxSize) }) t.Run("exists", func(t *testing.T) { - TestExists(t, cons, min, max) + TestExists(t, cons, minSize, maxSize) }) t.Run("iterate", func(t *testing.T) { - TestIterate(t, cons, min, max) + TestIterate(t, cons, minSize, maxSize) }) } @@ -51,12 +51,12 @@ func TestInfo(t *testing.T, cons Constructor, expectedType string, expectedPath require.Equal(t, expectedPath, s.Path()) } -func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objectDesc { +func prepare(t *testing.T, count int, s common.Storage, minSize, maxSize uint64) []objectDesc { objects := make([]objectDesc, count) r := mrand.New(mrand.NewSource(0)) for i := range objects { - objects[i].obj = NewObject(min + uint64(r.Intn(int(max-min+1)))) // not too large + objects[i].obj = NewObject(minSize + uint64(r.Intn(int(maxSize-minSize+1)))) // not too large objects[i].addr = objectCore.AddressOf(objects[i].obj) raw, err := objects[i].obj.Marshal() diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go index a3bbc021d..b8e88f84a 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go @@ -13,13 +13,13 @@ import ( // TestControl checks correctness of a read-only mode. // cons must return a storage which is NOT opened. -func TestControl(t *testing.T, cons Constructor, min, max uint64) { +func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - objects := prepare(t, 10, s, min, max) - require.NoError(t, s.Close()) + objects := prepare(t, 10, s, minSize, maxSize) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(mode.ComponentReadOnly)) for i := range objects { @@ -34,7 +34,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) { t.Run("put fails", func(t *testing.T) { var prm common.PutPrm - prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1)))) + prm.Object = NewObject(minSize + uint64(rand.Intn(int(maxSize-minSize+1)))) prm.Address = objectCore.AddressOf(prm.Object) _, err := s.Put(context.Background(), prm) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go index 750619a30..3a163f6b1 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go @@ -11,13 +11,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestDelete(t *testing.T, cons Constructor, min, max uint64) { +func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() - objects := prepare(t, 4, s, min, max) + objects := prepare(t, 4, s, minSize, maxSize) t.Run("delete non-existent", func(t *testing.T) { var prm common.DeletePrm diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go index 33b50b12f..f34fe5f97 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go @@ -10,13 +10,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestExists(t *testing.T, cons Constructor, min, max uint64) { +func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() - objects := prepare(t, 1, s, min, max) + objects := prepare(t, 1, s, minSize, maxSize) t.Run("missing object", func(t *testing.T) { prm := common.ExistsPrm{Address: oidtest.Address()} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go index 12f73c3e9..af0f4b45d 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go @@ -11,13 +11,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestGet(t *testing.T, cons Constructor, min, max uint64) { +func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() - objects := prepare(t, 2, s, min, max) + objects := prepare(t, 2, s, minSize, maxSize) t.Run("missing object", func(t *testing.T) { gPrm := common.GetPrm{Address: oidtest.Address()} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go index 93de683c2..13032048c 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go @@ -13,13 +13,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestGetRange(t *testing.T, cons Constructor, min, max uint64) { +func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() - objects := prepare(t, 1, s, min, max) + objects := prepare(t, 1, s, minSize, maxSize) t.Run("missing object", func(t *testing.T) { gPrm := common.GetRangePrm{Address: oidtest.Address()} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index e66fe87b6..d54c54f59 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -3,6 +3,7 @@ package blobstortest import ( "context" "errors" + "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -10,13 +11,13 @@ import ( "github.com/stretchr/testify/require" ) -func TestIterate(t *testing.T, cons Constructor, min, max uint64) { +func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() - objects := prepare(t, 10, s, min, max) + objects := prepare(t, 10, s, minSize, maxSize) // Delete random object to ensure it is not iterated over. const delID = 2 @@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) { _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) - objects = append(objects[:delID], objects[delID+1:]...) + objects = slices.Delete(objects, delID, delID+1) runTestNormalHandler(t, s, objects) @@ -49,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Equal(t, len(objects), len(seen)) + require.Len(t, objects, len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index f213d7547..ff1aa9d64 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -42,10 +42,10 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I _, err := b.storage[i].Storage.Iterate(ctx, prm) if err != nil { if prm.IgnoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("storage_path", b.storage[i].Storage.Path()), zap.String("storage_type", b.storage[i].Storage.Type()), - zap.String("err", err.Error())) + zap.Error(err)) continue } return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 079728380..2786321a8 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -3,10 +3,14 @@ package blobstor import ( "context" "encoding/binary" + "errors" "os" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -21,7 +25,9 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompressObjects(true), + WithCompression(compression.Config{ + Enabled: true, + }), ) defer os.RemoveAll(p) @@ -30,9 +36,9 @@ func TestIterateObjects(t *testing.T) { require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) // initialize Blobstor - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) - defer blobStor.Close() + defer blobStor.Close(context.Background()) const objNum = 5 @@ -44,7 +50,7 @@ func TestIterateObjects(t *testing.T) { mObjs := make(map[string]addrData) - for i := uint64(0); i < objNum; i++ { + for i := range uint64(objNum) { sz := smalSz big := i < objNum/2 @@ -90,117 +96,60 @@ func TestIterateObjects(t *testing.T) { } func TestIterate_IgnoreErrors(t *testing.T) { - t.Skip() - // dir := t.TempDir() - // - // const ( - // smallSize = 512 - // objCount = 5 - // ) - // bsOpts := []Option{ - // WithCompressObjects(true), - // WithRootPath(dir), - // WithSmallSizeLimit(smallSize * 2), // + header - // WithBlobovniczaOpenedCacheSize(1), - // WithBlobovniczaShallowWidth(1), - // WithBlobovniczaShallowDepth(1)} - // bs := New(bsOpts...) - // require.NoError(t, bs.Open(false)) - // require.NoError(t, bs.Init()) - // - // addrs := make([]oid.Address, objCount) - // for i := range addrs { - // addrs[i] = oidtest.Address() - // - // obj := object.New() - // obj.SetContainerID(addrs[i].Container()) - // obj.SetID(addrs[i].Object()) - // obj.SetPayload(make([]byte, smallSize<<(i%2))) - // - // objData, err := obj.Marshal() - // require.NoError(t, err) - // - // _, err = bs.PutRaw(addrs[i], objData, true) - // require.NoError(t, err) - // } - // - // // Construct corrupted compressed object. - // buf := bytes.NewBuffer(nil) - // badObject := make([]byte, smallSize/2+1) - // enc, err := zstd.NewWriter(buf) - // require.NoError(t, err) - // rawData := enc.EncodeAll(badObject, nil) - // for i := 4; /* magic size */ i < len(rawData); i += 2 { - // rawData[i] ^= 0xFF - // } - // // Will be put uncompressed but fetched as compressed because of magic. - // _, err = bs.PutRaw(oidtest.Address(), rawData, false) - // require.NoError(t, err) - // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData)) - // - // require.NoError(t, bs.Close()) - // - // // Increase width to have blobovnicza which is definitely empty. - // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...) - // require.NoError(t, b.Open(false)) - // require.NoError(t, b.Init()) - // - // var p string - // for i := 0; i < 2; i++ { - // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10)) - // if _, ok := bs.blobovniczas.opened.Get(bp); !ok { - // p = bp - // break - // } - // } - // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache") - // require.NoError(t, os.Chmod(p, 0)) - // - // require.NoError(t, b.Close()) - // require.NoError(t, bs.Open(false)) - // require.NoError(t, bs.Init()) - // - // var prm IteratePrm - // prm.SetIterationHandler(func(e IterationElement) error { - // return nil - // }) - // _, err = bs.Iterate(prm) - // require.Error(t, err) - // - // prm.IgnoreErrors() - // - // t.Run("skip invalid objects", func(t *testing.T) { - // actual := make([]oid.Address, 0, len(addrs)) - // prm.SetIterationHandler(func(e IterationElement) error { - // obj := object.New() - // err := obj.Unmarshal(e.data) - // if err != nil { - // return err - // } - // - // var addr oid.Address - // cnr, _ := obj.ContainerID() - // addr.SetContainer(cnr) - // id, _ := obj.ID() - // addr.SetObject(id) - // actual = append(actual, addr) - // return nil - // }) - // - // _, err := bs.Iterate(prm) - // require.NoError(t, err) - // require.ElementsMatch(t, addrs, actual) - // }) - // t.Run("return errors from handler", func(t *testing.T) { - // n := 0 - // expectedErr := errors.New("expected error") - // prm.SetIterationHandler(func(e IterationElement) error { - // if n++; n == objCount/2 { - // return expectedErr - // } - // return nil - // }) - // _, err := bs.Iterate(prm) - // require.ErrorIs(t, err, expectedErr) - // }) + ctx := context.Background() + + myErr := errors.New("unique error") + nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil } + panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") } + errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr } + + var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error) + st1 := teststore.New( + teststore.WithSubstorage(memstore.New()), + teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { + return s1iter(prm) + })) + st2 := teststore.New( + teststore.WithSubstorage(memstore.New()), + teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { + return s2iter(prm) + })) + + bsOpts := []Option{WithStorages([]SubStorage{ + {Storage: st1}, + {Storage: st2}, + })} + bs := New(bsOpts...) + require.NoError(t, bs.Open(ctx, mode.ReadWrite)) + require.NoError(t, bs.Init(ctx)) + + nopHandler := func(e common.IterationElement) error { + return nil + } + + t.Run("no errors", func(t *testing.T) { + s1iter = nopIter + s2iter = nopIter + _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) + require.NoError(t, err) + }) + t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) { + s1iter = errIter + s2iter = panicIter + _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) + require.ErrorIs(t, err, myErr) + }) + + t.Run("ignore errors, storage 1", func(t *testing.T) { + s1iter = errIter + s2iter = nopIter + _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) + require.NoError(t, err) + }) + t.Run("ignore errors, storage 2", func(t *testing.T) { + s1iter = nopIter + s2iter = errIter + _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) + require.NoError(t, err) + }) } diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go index 7e057a0e3..070b1eac9 100644 --- a/pkg/local_object_storage/blobstor/logger.go +++ b/pkg/local_object_storage/blobstor/logger.go @@ -1,6 +1,8 @@ package blobstor import ( + "context" + storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -11,8 +13,8 @@ const ( putOp = "PUT" ) -func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) { - storagelog.Write(l, +func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) { + storagelog.Write(ctx, l, storagelog.AddressField(addr), storagelog.OpField(op), storagelog.StorageTypeField(typ), diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 449d4352a..3df96a1c3 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -1,6 +1,8 @@ package memstore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -10,11 +12,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error { return nil } -func (s *memstoreImpl) Init() error { return nil } -func (s *memstoreImpl) Close() error { return nil } -func (s *memstoreImpl) Type() string { return Type } -func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } -func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f } -func (s *memstoreImpl) SetParentID(string) {} +func (s *memstoreImpl) Init() error { return nil } +func (s *memstoreImpl) Close(context.Context) error { return nil } +func (s *memstoreImpl) Type() string { return Type } +func (s *memstoreImpl) Path() string { return s.rootPath } +func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } +func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} +func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 0252c7983..7ef7e37a4 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, // Decompress the data. var err error if data, err = s.compression.Decompress(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // Unmarshal the SDK object. obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil @@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common elem := common.IterationElement{ ObjectData: v, } - if err := elem.Address.DecodeString(string(k)); err != nil { + if err := elem.Address.DecodeString(k); err != nil { if req.IgnoreErrors { continue } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err)) + return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err)) } var err error if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil { diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go index 8d1480dff..f904d4232 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" ) @@ -16,9 +15,8 @@ import ( func TestSimpleLifecycle(t *testing.T) { s := New( WithRootPath("memstore"), - WithLogger(test.NewLogger(t)), ) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 3d67b1e9c..7605af4e5 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -2,33 +2,20 @@ package memstore import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type cfg struct { - log *logger.Logger rootPath string readOnly bool - compression *compression.Config - reportError func(string, error) + compression *compression.Compressor } func defaultConfig() *cfg { - return &cfg{ - log: &logger.Logger{Logger: zap.L()}, - reportError: func(string, error) {}, - } + return &cfg{} } type Option func(*cfg) -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - func WithRootPath(p string) Option { return func(c *cfg) { c.rootPath = p diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index a579a6f92..80268fa7a 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -8,7 +8,7 @@ import ( ) // SetMode sets the blobstor mode of operation. -func (b *BlobStor) SetMode(m mode.Mode) error { +func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { b.modeMtx.Lock() defer b.modeMtx.Unlock() @@ -20,14 +20,14 @@ func (b *BlobStor) SetMode(m mode.Mode) error { return nil } - err := b.Close() + err := b.Close(ctx) if err == nil { - if err = b.openBlobStor(context.TODO(), m); err == nil { - err = b.Init() + if err = b.openBlobStor(ctx, m); err == nil { + err = b.Init(ctx) } } if err != nil { - return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) + return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) } b.mode = m diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 501c95a1d..64e3c8da1 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -106,11 +106,11 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database var errG errgroup.Group - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) errG.Go(func() error { @@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) { gen := genEntry.create() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { @@ -200,10 +200,10 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) raw, err := obj.Marshal() diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index 1adae303d..fe9c109dd 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e // marshal object data, err := prm.Object.Marshal() if err != nil { - return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err) + return common.PutRes{}, fmt.Errorf("marshal the object: %w", err) } prm.RawData = data } @@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e res, err := b.storage[i].Storage.Put(ctx, prm) if err == nil { success = true - logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) + logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) } return res, err } diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index 101c60752..f28816555 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -13,23 +13,19 @@ type StorageIDUpdate interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter) error { +func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { var summary common.RebuildRes var rErr error for _, storage := range b.storage { res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - WorkerLimiter: limiter, + MetaStorage: upd, + Limiter: concLimiter, + FillPercent: fillPercent, }) summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved if err != nil { - b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages, + b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages, zap.String("failed_storage_path", storage.Storage.Path()), zap.String("failed_storage_type", storage.Storage.Type()), zap.Error(err)) @@ -37,7 +33,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con break } } - b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted, + b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted, zap.Bool("success", rErr == nil), zap.Uint64("total_files_removed", summary.FilesRemoved), zap.Uint64("total_objects_moved", summary.ObjectsMoved)) diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index bc0bed49d..3a38ecf82 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -1,6 +1,8 @@ package teststore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -15,9 +17,9 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Config) - Compressor func() *compression.Config - SetReportErrorFunc func(f func(string, error)) + SetCompressor func(cc *compression.Compressor) + Compressor func() *compression.Compressor + SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) GetRange func(common.GetRangePrm) (common.GetRangeRes, error) @@ -43,15 +45,15 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Config)) Option { +func WithSetCompressor(f func(*compression.Compressor)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Config) Option { +func WithCompressor(f func() *compression.Compressor) Option { return func(c *cfg) { c.overrides.Compressor = f } } -func WithReportErrorFunc(f func(func(string, error))) Option { +func WithReportErrorFunc(f func(func(context.Context, string, error))) Option { return func(c *cfg) { c.overrides.SetReportErrorFunc = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index fea4a2d49..190b6a876 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -77,14 +77,14 @@ func (s *TestStore) Init() error { } } -func (s *TestStore) Close() error { +func (s *TestStore) Close(ctx context.Context) error { s.mu.RLock() defer s.mu.RUnlock() switch { case s.overrides.Close != nil: return s.overrides.Close() case s.st != nil: - return s.st.Close() + return s.st.Close(ctx) default: panic("unexpected storage call: Close()") } @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Config) { +func (s *TestStore) SetCompressor(cc *compression.Compressor) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Config) { } } -func (s *TestStore) Compressor() *compression.Config { +func (s *TestStore) Compressor() *compression.Compressor { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Config { } } -func (s *TestStore) SetReportErrorFunc(f func(string, error)) { +func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index e45f502ac..e0617a832 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -44,22 +44,25 @@ func (r ListContainersRes) Containers() []cid.ID { // ContainerSize returns the sum of estimation container sizes among all shards. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { + defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { - res, err = e.containerSize(prm) - return err + var csErr error + res, csErr = e.containerSize(ctx, prm) + return csErr }) return } // ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards. -func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { +func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) { var prm ContainerSizePrm prm.SetContainerID(id) - res, err := e.ContainerSize(prm) + res, err := e.ContainerSize(ctx, prm) if err != nil { return 0, err } @@ -67,18 +70,15 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { return res.Size(), nil } -func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { - if e.metrics != nil { - defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)() - } - - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { + var res ContainerSizeRes + err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(csPrm) + csRes, err := sh.ContainerSize(ctx, csPrm) if err != nil { - e.reportShardError(sh, "can't get container size", err, + e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) return false } @@ -88,16 +88,19 @@ func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRe return false }) - return + return res, err } // ListContainers returns a unique container IDs presented in the engine objects. // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) { + defer elapsed("ListContainers", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { - res, err = e.listContainers(ctx) - return err + var lcErr error + res, lcErr = e.listContainers(ctx) + return lcErr }) return @@ -116,16 +119,12 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { } func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { - if e.metrics != nil { - defer elapsed("ListContainers", e.metrics.AddMethodDuration)() - } - uniqueIDs := make(map[string]cid.ID) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { - e.reportShardError(sh, "can't get list of containers", err) + e.reportShardError(ctx, sh, "can't get list of containers", err) return false } @@ -137,7 +136,9 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, } return false - }) + }); err != nil { + return ListContainersRes{}, err + } result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 03196400a..bf1649f6e 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,10 +22,6 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { - return e.open(ctx) -} - -func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -49,16 +45,16 @@ func (e *StorageEngine) open(ctx context.Context) error { for res := range errCh { if res.err != nil { - e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping, + e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) sh := e.shards[res.id] delete(e.shards, res.id) - err := sh.Close() + err := sh.Close(ctx) if err != nil { - e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, + e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -77,13 +73,11 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.cfg.lowMem && e.anyShardRequiresRefill() { + if e.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } for id, sh := range e.shards { - id := id - sh := sh eg.Go(func() error { if err := sh.Init(ctx); err != nil { errCh <- shardInitError{ @@ -97,29 +91,29 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := eg.Wait() close(errCh) if err != nil { - return fmt.Errorf("failed to initialize shards: %w", err) + return fmt.Errorf("initialize shards: %w", err) } for res := range errCh { if res.err != nil { if errors.Is(res.err, blobstor.ErrInitBlobovniczas) { - e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping, + e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) sh := e.shards[res.id] delete(e.shards, res.id) - err := sh.Close() + err := sh.Close(ctx) if err != nil { - e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, + e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } continue } - return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err) + return fmt.Errorf("initialize shard %s: %w", res.id, res.err) } } @@ -128,7 +122,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { } e.wg.Add(1) - go e.setModeLoop() + go e.setModeLoop(ctx) return nil } @@ -151,25 +145,19 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.setBlockExecErr(ctx, errClosed) + return e.closeEngine(ctx) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(releasePools bool) error { +func (e *StorageEngine) closeAllShards(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() - if releasePools { - for _, p := range e.shardPools { - p.Release() - } - } - for id, sh := range e.shards { - if err := sh.Close(); err != nil { - e.log.Debug(logs.EngineCouldNotCloseShard, + if err := sh.Close(ctx); err != nil { + e.log.Debug(ctx, logs.EngineCouldNotCloseShard, zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -184,90 +172,29 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.err != nil { - return e.blockExec.err + if e.blockExec.closed { + return errClosed } return op() } -// sets the flag of blocking execution of all data operations according to err: -// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method -// (if err == errClosed => additionally releases pools and does not allow to resume executions). -// - otherwise, resumes execution. If exec was blocked, calls open method. -// -// Can be called concurrently with exec. In this case it waits for all executions to complete. -func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { +func (e *StorageEngine) closeEngine(ctx context.Context) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - prevErr := e.blockExec.err - - wasClosed := errors.Is(prevErr, errClosed) - if wasClosed { + if e.blockExec.closed { return errClosed } - e.blockExec.err = err - - if err == nil { - if prevErr != nil { // block -> ok - return e.open(ctx) - } - } else if prevErr == nil { // ok -> block - return e.close(errors.Is(err, errClosed)) - } - - // otherwise do nothing - - return nil -} - -// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. -// To resume the execution, use ResumeExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources -// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -// -// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution -// for this. -func (e *StorageEngine) BlockExecution(err error) error { - return e.setBlockExecErr(context.Background(), err) -} - -// ResumeExecution resumes the execution of any data-related operation. -// To block the execution, use BlockExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources -// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -func (e *StorageEngine) ResumeExecution() error { - return e.setBlockExecErr(context.Background(), nil) + e.blockExec.closed = true + return e.closeAllShards(ctx) } type ReConfiguration struct { - errorsThreshold uint32 - shardPoolSize uint32 - shards map[string][]shard.Option // meta path -> shard opts } -// SetErrorsThreshold sets a size amount of errors after which -// shard is moved to read-only mode. -func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) { - rCfg.errorsThreshold = errorsThreshold -} - -// SetShardPoolSize sets a size of worker pool for each shard. -func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) { - rCfg.shardPoolSize = shardPoolSize -} - // AddShard adds a shard for the reconfiguration. // Shard identifier is calculated from paths used in blobstor. func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) { @@ -322,12 +249,12 @@ loop: e.mtx.RUnlock() - e.removeShards(shardsToRemove...) + e.removeShards(ctx, shardsToRemove...) for _, p := range shardsToReload { err := p.sh.Reload(ctx, p.opts...) if err != nil { - e.log.Error(logs.EngineCouldNotReloadAShard, + e.log.Error(ctx, logs.EngineCouldNotReloadAShard, zap.Stringer("shard id", p.sh.ID()), zap.Error(err)) } @@ -336,7 +263,7 @@ loop: for _, newID := range shardsToAdd { sh, err := e.createShard(ctx, rcfg.shards[newID]) if err != nil { - return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err) + return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err) } idStr := sh.ID().String() @@ -346,17 +273,17 @@ loop: err = sh.Init(ctx) } if err != nil { - _ = sh.Close() - return fmt.Errorf("could not init %s shard: %w", idStr, err) + _ = sh.Close(ctx) + return fmt.Errorf("init %s shard: %w", idStr, err) } err = e.addShard(sh) if err != nil { - _ = sh.Close() - return fmt.Errorf("could not add %s shard: %w", idStr, err) + _ = sh.Close(ctx) + return fmt.Errorf("add %s shard: %w", idStr, err) } - e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr)) + e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) } return nil diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index f0809883c..4ff0ed5ec 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,7 +2,6 @@ package engine import ( "context" - "errors" "fmt" "io/fs" "os" @@ -12,17 +11,14 @@ import ( "testing" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -163,42 +159,6 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } -func TestExecBlocks(t *testing.T) { - e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many - - // put some object - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - - addr := object.AddressOf(obj) - - require.NoError(t, Put(context.Background(), e, obj)) - - // block executions - errBlock := errors.New("block exec err") - - require.NoError(t, e.BlockExecution(errBlock)) - - // try to exec some op - _, err := Head(context.Background(), e, addr) - require.ErrorIs(t, err, errBlock) - - // resume executions - require.NoError(t, e.ResumeExecution()) - - _, err = Head(context.Background(), e, addr) // can be any data-related op - require.NoError(t, err) - - // close - require.NoError(t, e.Close(context.Background())) - - // try exec after close - _, err = Head(context.Background(), e, addr) - require.Error(t, err) - - // try to resume - require.Error(t, e.ResumeExecution()) -} - func TestPersistentShardID(t *testing.T) { dir := t.TempDir() @@ -208,7 +168,7 @@ func TestPersistentShardID(t *testing.T) { require.NoError(t, te.ng.Close(context.Background())) newTe := newEngineWithErrorThreshold(t, dir, 1) - for i := 0; i < len(newTe.shards); i++ { + for i := range len(newTe.shards) { require.Equal(t, te.shards[i].id, newTe.shards[i].id) } require.NoError(t, newTe.ng.Close(context.Background())) @@ -245,7 +205,6 @@ func TestReload(t *testing.T) { // no new paths => no new shards require.Equal(t, shardNum, len(e.shards)) - require.Equal(t, shardNum, len(e.shardPools)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) @@ -257,7 +216,6 @@ func TestReload(t *testing.T) { require.NoError(t, e.Reload(context.Background(), rcfg)) require.Equal(t, shardNum+1, len(e.shards)) - require.Equal(t, shardNum+1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -269,7 +227,7 @@ func TestReload(t *testing.T) { e, currShards := engineWithShards(t, removePath, shardNum) var rcfg ReConfiguration - for i := 0; i < len(currShards)-1; i++ { // without one of the shards + for i := range len(currShards) - 1 { // without one of the shards rcfg.AddShard(currShards[i], nil) } @@ -277,7 +235,6 @@ func TestReload(t *testing.T) { // removed one require.Equal(t, shardNum-1, len(e.shards)) - require.Equal(t, shardNum-1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -302,7 +259,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str meta.WithEpochState(epochState{}), ), } - }) + }). + prepare(t) e, ids := te.engine, te.shardIDs for _, id := range ids { @@ -310,10 +268,6 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str } require.Equal(t, num, len(e.shards)) - require.Equal(t, num, len(e.shardPools)) - - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) return e, currShards } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 096528967..223cdbc48 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -24,9 +23,6 @@ type DeletePrm struct { forceRemoval bool } -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} - // WithAddress is a Delete option to set the addresses of the objects to delete. // // Option is required. @@ -51,27 +47,21 @@ func (p *DeletePrm) WithForceRemoval() { // NOTE: Marks any object to be deleted (despite any prohibitions // on operations with that object) if WithForceRemoval option has // been provided. -func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) { +func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", trace.WithAttributes( attribute.String("address", prm.addr.EncodeToString()), attribute.Bool("force_removal", prm.forceRemoval), )) defer span.End() + defer elapsed("Delete", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.delete(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.delete(ctx, prm) }) - - return } -func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - if e.metrics != nil { - defer elapsed("Delete", e.metrics.AddMethodDuration)() - } - +func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { var locked struct { is bool } @@ -81,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -100,7 +90,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e return false } else { if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check object existence", err) + e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) } return false } @@ -116,7 +106,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e _, err = sh.Inhume(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not inhume object in shard", err) + e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) var target *apistatus.ObjectLocked locked.is = errors.As(err, &target) @@ -126,39 +116,40 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }) + }); err != nil { + return err + } if locked.is { - return DeleteRes{}, new(apistatus.ObjectLocked) + return new(apistatus.ObjectLocked) } if splitInfo != nil { - e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } - return DeleteRes{}, nil + return nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) var selectPrm shard.SelectPrm selectPrm.SetFilters(fs) - selectPrm.SetContainerID(addr.Container()) + selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID var inhumePrm shard.InhumePrm if force { inhumePrm.ForceRemoval() } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { - e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren, + e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return false } @@ -167,10 +158,9 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, + e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } @@ -191,16 +181,15 @@ func (e *StorageEngine) deleteChunks( var objID oid.ID err := objID.ReadFromV2(chunk.ID) if err != nil { - e.reportShardError(sh, "could not delete EC chunk", err) + e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) } addr.SetObject(objID) inhumePrm.MarkAsGarbage(addr) _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, + e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 4a6758012..a56598c09 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -49,18 +48,13 @@ func TestDeleteBigObject(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t) - s2 := testNewShard(t) - s3 := testNewShard(t) - - e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine - e.log = test.NewLogger(t) - defer e.Close(context.Background()) + e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() for i := range children { - require.NoError(t, Put(context.Background(), e, children[i])) + require.NoError(t, Put(context.Background(), e, children[i], false)) } - require.NoError(t, Put(context.Background(), e, link)) + require.NoError(t, Put(context.Background(), e, link, false)) addrParent := object.AddressOf(parent) checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) @@ -76,8 +70,7 @@ func TestDeleteBigObject(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -119,16 +112,18 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t, shard.WithDisabledGC()) + te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{shard.WithDisabledGC()} + }).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() - e := testNewEngine(t).setInitializedShards(t, s1).engine - e.log = test.NewLogger(t) - defer e.Close(context.Background()) + s1 := te.shards[0] for i := range children { - require.NoError(t, Put(context.Background(), e, children[i])) + require.NoError(t, Put(context.Background(), e, children[i], false)) } - require.NoError(t, Put(context.Background(), e, link)) + require.NoError(t, Put(context.Background(), e, link, false)) addrParent := object.AddressOf(parent) checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) @@ -145,8 +140,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -157,7 +151,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { // delete physical var delPrm shard.DeletePrm delPrm.SetAddresses(addrParent) - _, err = s1.Delete(context.Background(), delPrm) + _, err := s1.Delete(context.Background(), delPrm) require.NoError(t, err) delPrm.SetAddresses(addrLink) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index b87d77e6c..376d545d3 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -12,8 +12,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -28,19 +28,15 @@ type StorageEngine struct { shards map[string]hashedShard - shardPools map[string]util.WorkerPool - closeCh chan struct{} setModeCh chan setModeRequest wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - - err error + mtx sync.RWMutex + closed bool } evacuateLimiter *evacuationLimiter - rebuildLimiter *rebuildLimiter } type shardWrapper struct { @@ -56,7 +52,7 @@ type setModeRequest struct { // setModeLoop listens setModeCh to perform degraded mode transition of a single shard. // Instead of creating a worker per single shard we use a single goroutine. -func (e *StorageEngine) setModeLoop() { +func (e *StorageEngine) setModeLoop(ctx context.Context) { defer e.wg.Done() var ( @@ -76,7 +72,7 @@ func (e *StorageEngine) setModeLoop() { if !ok { inProgress[sid] = struct{}{} go func() { - e.moveToDegraded(r.sh, r.errorCount, r.isMeta) + e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta) mtx.Lock() delete(inProgress, sid) @@ -88,7 +84,7 @@ func (e *StorageEngine) setModeLoop() { } } -func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta bool) { +func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) { sid := sh.ID() log := e.log.With( zap.Stringer("shard_id", sid), @@ -98,28 +94,26 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta defer e.mtx.RUnlock() if isMeta { - err := sh.SetMode(mode.DegradedReadOnly) + err := sh.SetMode(ctx, mode.DegradedReadOnly) if err == nil { - log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) return } - log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, + log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Error(err)) } - err := sh.SetMode(mode.ReadOnly) + err := sh.SetMode(ctx, mode.ReadOnly) if err != nil { - log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) + log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) return } - log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) } -// reportShardErrorBackground increases shard error counter and logs an error. -// It is intended to be used from background workers and -// doesn't change shard mode because of possible deadlocks. -func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) { +// reportShardErrorByID increases shard error counter and logs an error. +func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) { e.mtx.RLock() sh, ok := e.shards[id] e.mtx.RUnlock() @@ -128,50 +122,33 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er return } - if isLogical(err) { - e.log.Warn(msg, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) - return - } - - errCount := sh.errorCount.Add(1) - sh.Shard.IncErrorCounter() - e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err) + e.reportShardError(ctx, sh, msg, err) } // reportShardError checks that the amount of errors doesn't exceed the configured threshold. // If it does, shard is set to read-only mode. func (e *StorageEngine) reportShardError( + ctx context.Context, sh hashedShard, msg string, err error, fields ...zap.Field, ) { if isLogical(err) { - e.log.Warn(msg, + e.log.Warn(ctx, msg, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) + zap.Error(err)) return } errCount := sh.errorCount.Add(1) - sh.Shard.IncErrorCounter() - e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...) -} + e.metrics.IncErrorCounter(sh.ID().String()) -func (e *StorageEngine) reportShardErrorWithFlags( - sh *shard.Shard, - errCount uint32, - msg string, - err error, - fields ...zap.Field, -) { sid := sh.ID() - e.log.Warn(msg, append([]zap.Field{ + e.log.Warn(ctx, msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), - zap.String("error", err.Error()), + zap.Error(err), }, fields...)...) if e.errorsThreshold == 0 || errCount < e.errorsThreshold { @@ -180,7 +157,7 @@ func (e *StorageEngine) reportShardErrorWithFlags( req := setModeRequest{ errorCount: errCount, - sh: sh, + sh: sh.Shard, isMeta: errors.As(err, new(metaerr.Error)), } @@ -189,14 +166,17 @@ func (e *StorageEngine) reportShardErrorWithFlags( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, + e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } } func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) + return errors.As(err, &logicerr.Logical{}) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || + errors.As(err, new(*apistatus.ResourceExhausted)) } // Option represents StorageEngine's constructor option. @@ -209,20 +189,15 @@ type cfg struct { metrics MetricRegister - shardPoolSize uint32 - lowMem bool - rebuildWorkersCount uint32 - containerSource atomic.Pointer[containerSource] } func defaultCfg() *cfg { res := &cfg{ - log: &logger.Logger{Logger: zap.L()}, - shardPoolSize: 20, - rebuildWorkersCount: 100, + log: logger.NewLoggerWrapper(zap.L()), + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res @@ -236,14 +211,18 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } + evLimMtx := &sync.RWMutex{} + evLimCond := sync.NewCond(evLimMtx) + return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - shardPools: make(map[string]util.WorkerPool), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{}, - rebuildLimiter: newRebuildLimiter(c.rebuildWorkersCount), + cfg: c, + shards: make(map[string]hashedShard), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{ + guard: evLimMtx, + statusCond: evLimCond, + }, } } @@ -260,13 +239,6 @@ func WithMetrics(v MetricRegister) Option { } } -// WithShardPoolSize returns option to specify size of worker pool for each shard. -func WithShardPoolSize(sz uint32) Option { - return func(c *cfg) { - c.shardPoolSize = sz - } -} - // WithErrorThreshold returns an option to specify size amount of errors after which // shard is moved to read-only mode. func WithErrorThreshold(sz uint32) Option { @@ -282,13 +254,6 @@ func WithLowMemoryConsumption(lowMemCons bool) Option { } } -// WithRebuildWorkersCount returns an option to set the count of concurrent rebuild workers. -func WithRebuildWorkersCount(count uint32) Option { - return func(c *cfg) { - c.rebuildWorkersCount = count - } -} - // SetContainerSource sets container source. func (e *StorageEngine) SetContainerSource(cs container.Source) { e.containerSource.Store(&containerSource{cs: cs}) @@ -309,7 +274,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) ( return true, nil } - wasRemoved, err := container.WasRemoved(s.cs, id) + wasRemoved, err := container.WasRemoved(ctx, s.cs, id) if err != nil { return false, err } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 49976abbb..fc6d9ee9c 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,141 +2,111 @@ package engine import ( "context" + "fmt" "path/filepath" - "sync/atomic" + "runtime/debug" + "strings" + "sync" "testing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/hrw" - "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) -type epochState struct{} +type epochState struct { + currEpoch uint64 +} func (s epochState) CurrentEpoch() uint64 { - return 0 -} - -func BenchmarkExists(b *testing.B) { - b.Run("2 shards", func(b *testing.B) { - benchmarkExists(b, 2) - }) - b.Run("4 shards", func(b *testing.B) { - benchmarkExists(b, 4) - }) - b.Run("8 shards", func(b *testing.B) { - benchmarkExists(b, 8) - }) -} - -func benchmarkExists(b *testing.B, shardNum int) { - shards := make([]*shard.Shard, shardNum) - for i := 0; i < shardNum; i++ { - shards[i] = testNewShard(b) - } - - e := testNewEngine(b).setInitializedShards(b, shards...).engine - defer func() { require.NoError(b, e.Close(context.Background())) }() - - addr := oidtest.Address() - for i := 0; i < 100; i++ { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - err := Put(context.Background(), e, obj) - if err != nil { - b.Fatal(err) - } - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - var shPrm shard.ExistsPrm - shPrm.Address = addr - shPrm.ParentAddress = oid.Address{} - ok, _, err := e.exists(context.Background(), shPrm) - if err != nil || ok { - b.Fatalf("%t %v", ok, err) - } - } + return s.currEpoch } type testEngineWrapper struct { engine *StorageEngine + shards []*shard.Shard shardIDs []*shard.ID } func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper { - engine := New(WithLogger(test.NewLogger(t))) - for _, opt := range opts { - opt(engine.cfg) - } - return &testEngineWrapper{ - engine: engine, - } -} - -func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper { - for _, s := range shards { - pool, err := ants.NewPool(10, ants.WithNonblocking(true)) - require.NoError(t, err) - - te.engine.shards[s.ID().String()] = hashedShard{ - shardWrapper: shardWrapper{ - errorCount: new(atomic.Uint32), - Shard: s, - }, - hash: hrw.StringHash(s.ID().String()), - } - te.engine.shardPools[s.ID().String()] = pool - te.shardIDs = append(te.shardIDs, s.ID()) - } - return te + opts = append(testGetDefaultEngineOptions(t), opts...) + return &testEngineWrapper{engine: New(opts...)} } func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { - shards := make([]*shard.Shard, 0, num) - - for i := 0; i < num; i++ { - shards = append(shards, testNewShard(t)) - } - - return te.setInitializedShards(t, shards...) + return te.setShardsNumOpts(t, num, func(_ int) []shard.Option { + return testGetDefaultShardOptions(t) + }) } -func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { - opts := shardOpts(i) - id, err := te.engine.AddShard(context.Background(), opts...) +func (te *testEngineWrapper) setShardsNumOpts( + t testing.TB, num int, shardOpts func(id int) []shard.Option, +) *testEngineWrapper { + te.shards = make([]*shard.Shard, num) + te.shardIDs = make([]*shard.ID, num) + for i := range num { + shard, err := te.engine.createShard(context.Background(), shardOpts(i)) require.NoError(t, err) - te.shardIDs = append(te.shardIDs, id) + require.NoError(t, te.engine.addShard(shard)) + te.shards[i] = shard + te.shardIDs[i] = shard.ID() } + require.Len(t, te.engine.shards, num) return te } -func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { - defaultOpts := testDefaultShardOptions(t) - opts := append(defaultOpts, shardOpts(i)...) - id, err := te.engine.AddShard(context.Background(), opts...) - require.NoError(t, err) - te.shardIDs = append(te.shardIDs, id) - } +func (te *testEngineWrapper) setShardsNumAdditionalOpts( + t testing.TB, num int, shardOpts func(id int) []shard.Option, +) *testEngineWrapper { + return te.setShardsNumOpts(t, num, func(id int) []shard.Option { + return append(testGetDefaultShardOptions(t), shardOpts(id)...) + }) +} + +// prepare calls Open and Init on the created engine. +func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper { + require.NoError(t, te.engine.Open(context.Background())) + require.NoError(t, te.engine.Init(context.Background())) return te } +func testGetDefaultEngineOptions(t testing.TB) []Option { + return []Option{ + WithLogger(test.NewLogger(t)), + } +} + +func testGetDefaultShardOptions(t testing.TB) []shard.Option { + return []shard.Option{ + shard.WithLogger(test.NewLogger(t)), + shard.WithBlobStorOptions( + blobstor.WithStorages( + newStorages(t, t.TempDir(), 1<<20)), + blobstor.WithLogger(test.NewLogger(t)), + ), + shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), + shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), + shard.WithLimiter(&testQoSLimiter{t: t}), + } +} + +func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option { + return []meta.Option{ + meta.WithPath(filepath.Join(t.TempDir(), "metabase")), + meta.WithPermissions(0o700), + meta.WithEpochState(epochState{}), + meta.WithLogger(test.NewLogger(t)), + } +} + func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage { return []blobstor.SubStorage{ { @@ -146,7 +116,8 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithLogger(test.NewLogger(t))), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, @@ -187,33 +158,77 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, smallFileStorage, largeFileStorage } -func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard { - sid, err := generateShardID() - require.NoError(t, err) +var _ qos.Limiter = (*testQoSLimiter)(nil) - shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...) - s := shard.New(append(shardOpts, opts...)...) - - require.NoError(t, s.Open(context.Background())) - require.NoError(t, s.Init(context.Background())) - - return s +type testQoSLimiter struct { + t testing.TB + quard sync.Mutex + id int64 + readStacks map[int64][]byte + writeStacks map[int64][]byte } -func testDefaultShardOptions(t testing.TB) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages( - newStorages(t, t.TempDir(), 1<<20)), - blobstor.WithLogger(test.NewLogger(t)), - ), - shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - meta.WithLogger(test.NewLogger(t)), - ), +func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} + +func (t *testQoSLimiter) Close() { + t.quard.Lock() + defer t.quard.Unlock() + + var sb strings.Builder + var seqN int + for _, stack := range t.readStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) } + for _, stack := range t.writeStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) + } + require.True(t.t, seqN == 0, sb.String()) } + +func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.readStacks == nil { + t.readStacks = make(map[int64][]byte) + } + t.readStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.readStacks, id) + }, nil +} + +func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.writeStacks == nil { + t.writeStacks = make(map[int64][]byte) + } + t.writeStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.writeStacks, id) + }, nil +} + +func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 535435ceb..57029dd5f 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -46,7 +46,6 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) var testShards [2]*testShard te := testNewEngine(t, - WithShardPoolSize(1), WithErrorThreshold(errThreshold), ). setShardsNumOpts(t, 2, func(id int) []shard.Option { @@ -67,10 +66,8 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))), pilorama.WithPerm(0o700)), } - }) + }).prepare(t) e := te.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) for i, id := range te.shardIDs { testShards[i].id = id @@ -151,17 +148,17 @@ func TestErrorReporting(t *testing.T) { checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) } - for i := uint32(0); i < 2; i++ { + for i := range uint32(2) { _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) require.Error(t, err) checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly) checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) } - require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, false)) + require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false)) checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite) - require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true)) + require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true)) checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) require.NoError(t, te.ng.Close(context.Background())) }) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 04e427e49..c08dfbf03 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -4,19 +4,20 @@ import ( "context" "errors" "fmt" + "slices" "strings" "sync" "sync/atomic" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -24,6 +25,16 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +const ( + // containerWorkerCountDefault is a default value of the count of + // concurrent container evacuation workers. + containerWorkerCountDefault = 10 + // objectWorkerCountDefault is a default value of the count of + // concurrent object evacuation workers. + objectWorkerCountDefault = 10 ) var ( @@ -44,9 +55,6 @@ func (s EvacuateScope) String() string { var sb strings.Builder first := true if s&EvacuateScopeObjects == EvacuateScopeObjects { - if !first { - sb.WriteString(";") - } sb.WriteString("objects") first = false } @@ -77,8 +85,11 @@ type EvacuateShardPrm struct { ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error) TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error) IgnoreErrors bool - Async bool Scope EvacuateScope + RepOneOnly bool + + ContainerWorkerCount uint32 + ObjectWorkerCount uint32 } // EvacuateShardRes represents result of the EvacuateShard operation. @@ -189,21 +200,14 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } -const defaultEvacuateBatchSize = 100 - -type pooledShard struct { - hashedShard - pool util.WorkerPool -} - var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. // The shard being moved must be in read-only mode. -func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) { +func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err() default: } @@ -215,7 +219,6 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), )) @@ -223,7 +226,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev shards, err := e.getActualShards(shardIDs, prm) if err != nil { - return nil, err + return err } shardsToEvacuate := make(map[string]*shard.Shard) @@ -236,40 +239,36 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev } res := NewEvacuateShardRes() - ctx = ctxOrBackground(ctx, prm.Async) - eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) + ctx = context.WithoutCancel(ctx) + eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) if err != nil { - return nil, err + return err } + var mtx sync.RWMutex + copyShards := func() []hashedShard { + mtx.RLock() + defer mtx.RUnlock() + t := slices.Clone(shards) + return t + } eg.Go(func() error { - return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate) + return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate) }) - if prm.Async { - return nil, nil - } - - return res, eg.Wait() -} - -func ctxOrBackground(ctx context.Context, background bool) context.Context { - if background { - return context.Background() - } - return ctx + return nil } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), + attribute.Bool("repOneOnly", prm.RepOneOnly), )) defer func() { @@ -277,25 +276,51 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p e.evacuateLimiter.Complete(err) }() - e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, + zap.Stringer("scope", prm.Scope)) err = e.getTotals(ctx, prm, shardsToEvacuate, res) if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, + zap.Stringer("scope", prm.Scope)) return err } - for _, shardID := range shardIDs { - if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { - e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) - return err + ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm) + continueLoop := true + for i := 0; continueLoop && i < len(shardIDs); i++ { + select { + case <-ctx.Done(): + continueLoop = false + default: + egShard.Go(func() error { + err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject) + if err != nil { + cancel(err) + } + return err + }) } } + err = egShard.Wait() + if err != nil { + err = fmt.Errorf("shard error: %w", err) + } + errContainer := egContainer.Wait() + errObject := egObject.Wait() + if errContainer != nil { + err = errors.Join(err, fmt.Errorf("container error: %w", errContainer)) + } + if errObject != nil { + err = errors.Join(err, fmt.Errorf("object error: %w", errObject)) + } + if err != nil { + e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, + zap.Stringer("scope", prm.Scope)) + return err + } - e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation, + e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, zap.Uint64("total_objects", res.ObjectsTotal()), @@ -309,6 +334,27 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p return nil } +func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) ( + context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group, +) { + operationCtx, cancel := context.WithCancelCause(ctx) + egObject, _ := errgroup.WithContext(operationCtx) + objectWorkerCount := prm.ObjectWorkerCount + if objectWorkerCount == 0 { + objectWorkerCount = objectWorkerCountDefault + } + egObject.SetLimit(int(objectWorkerCount)) + egContainer, _ := errgroup.WithContext(operationCtx) + containerWorkerCount := prm.ContainerWorkerCount + if containerWorkerCount == 0 { + containerWorkerCount = containerWorkerCountDefault + } + egContainer.SetLimit(int(containerWorkerCount)) + egShard, _ := errgroup.WithContext(operationCtx) + + return operationCtx, cancel, egShard, egContainer, egObject +} + func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals") defer span.End() @@ -335,8 +381,9 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha return nil } -func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", trace.WithAttributes( @@ -345,11 +392,10 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E defer span.End() if prm.Scope.WithObjects() { - if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { + if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil { return err } } - if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() { if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { return err @@ -359,43 +405,84 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E return nil } -func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + egContainer *errgroup.Group, egObject *errgroup.Group, ) error { - var listPrm shard.ListWithCursorPrm - listPrm.WithCount(defaultEvacuateBatchSize) - sh := shardsToEvacuate[shardID] - - var c *meta.Cursor - for { - listPrm.WithCursor(c) - - // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes - // because ListWithCursor works only with the metabase. - listRes, err := sh.ListWithCursor(ctx, listPrm) - if err != nil { - if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) { - break + var cntPrm shard.IterateOverContainersPrm + cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error { + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } + egContainer.Go(func() error { + var skip bool + c, err := e.containerSource.Load().cs.Get(ctx, cnt) + if err != nil { + if client.IsErrContainerNotFound(err) { + skip = true + } else { + return err + } + } + if !skip && prm.RepOneOnly { + skip = e.isNotRepOne(c) + } + if skip { + countPrm := shard.CountAliveObjectsInContainerPrm{ + ObjectType: objType, + ContainerID: cnt, + } + count, err := sh.CountAliveObjectsInContainer(ctx, countPrm) + if err != nil { + return err + } + res.objSkipped.Add(count) + return nil + } + var objPrm shard.IterateOverObjectsInContainerPrm + objPrm.ObjectType = objType + objPrm.ContainerID = cnt + objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error { + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } + egObject.Go(func() error { + err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value) + if err != nil { + cancel(err) + } + return err + }) + return nil + } + err = sh.IterateOverObjectsInContainer(ctx, objPrm) + if err != nil { + cancel(err) } - e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err - } - - if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil { - return err - } - - c = listRes.Cursor() + }) + return nil } - return nil + + sh.SetEvacuationInProgress(true) + err := sh.IterateOverContainers(ctx, cntPrm) + if err != nil { + cancel(err) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField) + } + return err } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] + shards := getShards() var listPrm pilorama.TreeListTreesPrm first := true @@ -422,7 +509,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, } func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", trace.WithAttributes( @@ -442,39 +529,39 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree return err } if success { - e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal, + e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) continue } moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm) if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } if moved { - e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote, + e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) } else if prm.IgnoreErrors { res.trFailed.Add(1) - e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } else { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID) } } @@ -483,14 +570,14 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) { if prm.TreeHandler == nil { - return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) + return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) } return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh) } func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) (bool, string, error) { target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) if err != nil { @@ -560,15 +647,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, -) (pooledShard, bool, error) { + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, +) (hashedShard, bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result pooledShard + var result hashedShard var found bool for _, target := range shards { select { case <-ctx.Done(): - return pooledShard{}, false, ctx.Err() + return hashedShard{}, false, ctx.Err() default: } @@ -596,7 +683,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora return result, found, nil } -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) { +func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { e.mtx.RLock() defer e.mtx.RUnlock() @@ -626,83 +713,85 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) // We must have all shards, to have correct information about their // indexes in a sorted slice and set appropriate marks in the metabase. // Evacuated shard is skipped during put. - shards := make([]pooledShard, 0, len(e.shards)) + shards := make([]hashedShard, 0, len(e.shards)) for id := range e.shards { - shards = append(shards, pooledShard{ - hashedShard: hashedShard(e.shards[id]), - pool: e.shardPools[id], - }) + shards = append(shards, e.shards[id]) } return shards, nil } -func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects", - trace.WithAttributes( - attribute.Int("objects_count", len(toEvacuate)), - )) + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() - for i := range toEvacuate { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - addr := toEvacuate[i].Address + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } - var getPrm shard.GetPrm - getPrm.SetAddress(addr) + shards := getShards() + addr := objInfo.Address - getRes, err := sh.Get(ctx, getPrm) - if err != nil { - if prm.IgnoreErrors { - res.objFailed.Add(1) - continue - } - e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return err - } + var getPrm shard.GetPrm + getPrm.SetAddress(addr) + getPrm.SkipEvacCheck(true) - evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res) - if err != nil { - return err - } - - if evacuatedLocal { - continue - } - - if prm.ObjectsHandler == nil { - // Do not check ignoreErrors flag here because - // ignoring errors on put make this command kinda useless. - return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i]) - } - - moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) - if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return err - } - if moved { - res.objEvacuated.Add(1) - } else if prm.IgnoreErrors { + getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm) + if err != nil { + if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } else { - return fmt.Errorf("object %s was not replicated", addr) + return nil } + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + return err + } + + evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr) + if err != nil { + return err + } + + if evacuatedLocal { + return nil + } + + if prm.ObjectsHandler == nil { + // Do not check ignoreErrors flag here because + // ignoring errors on put make this command kinda useless. + return fmt.Errorf("%w: %s", errPutShard, objInfo) + } + + moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) + if err != nil { + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + return err + } + if moved { + res.objEvacuated.Add(1) + } else if prm.IgnoreErrors { + res.objFailed.Add(1) + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + } else { + return fmt.Errorf("object %s was not replicated", addr) } return nil } +func (e *StorageEngine) isNotRepOne(c *container.Container) bool { + p := c.Value.PlacementPolicy() + for i := range p.NumberOfReplicas() { + if p.ReplicaDescriptor(i).NumberOfObjects() > 1 { + return true + } + } + return false +} + func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -715,15 +804,14 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status { + switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) - e.log.Debug(logs.EngineObjectIsMovedToAnotherShard, + e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr), - evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) return true, nil case putToShardExists, putToShardRemoved: res.objSkipped.Add(1) @@ -765,3 +853,11 @@ func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error { return e.evacuateLimiter.ResetEvacuationStatus() } + +func (e *StorageEngine) ResetEvacuationStatusForShards() { + e.mtx.RLock() + defer e.mtx.RUnlock() + for _, sh := range e.shards { + sh.SetEvacuationInProgress(false) + } +} diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index 1e6b9ccb1..b75e8686d 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -3,6 +3,7 @@ package engine import ( "context" "fmt" + "slices" "sync" "time" @@ -94,8 +95,7 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.startedAt == defaultTime { + if s.startedAt.IsZero() { return nil } return &s.startedAt @@ -105,8 +105,7 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.finishedAt == defaultTime { + if s.finishedAt.IsZero() { return nil } return &s.finishedAt @@ -123,8 +122,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState { if s == nil { return nil } - shardIDs := make([]string, len(s.shardIDs)) - copy(shardIDs, s.shardIDs) + shardIDs := slices.Clone(s.shardIDs) return &EvacuationState{ shardIDs: shardIDs, @@ -141,7 +139,8 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard sync.RWMutex + guard *sync.RWMutex + statusCond *sync.Cond // used in unit tests } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -167,6 +166,7 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } + l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -182,6 +182,7 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() + l.statusCond.Broadcast() l.eg = nil } @@ -216,6 +217,7 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil + l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 55268b549..f2ba7d994 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -6,9 +6,12 @@ import ( "fmt" "path/filepath" "strconv" + "sync" + "sync/atomic" "testing" "time" + coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -18,14 +21,38 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) +type containerStorage struct { + cntmap map[cid.ID]*container.Container + latency time.Duration +} + +func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) { + time.Sleep(cs.latency) + v, ok := cs.cntmap[id] + if !ok { + return nil, new(apistatus.ContainerNotFound) + } + coreCnt := coreContainer.Container{ + Value: *v, + } + return &coreCnt, nil +} + +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { + return nil, nil +} + func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) { dir := t.TempDir() @@ -48,10 +75,9 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng pilorama.WithPerm(0o700), ), } - }) + }). + prepare(t) e, ids := te.engine, te.shardIDs - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) objects := make([]*objectSDK.Object, 0, objPerShard*len(ids)) treeID := "version" @@ -59,10 +85,15 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng {Key: pilorama.AttributeVersion, Value: []byte("XXX")}, {Key: pilorama.AttributeFilename, Value: []byte("file.txt")}, } - + cnrMap := make(map[cid.ID]*container.Container) for _, sh := range ids { - for i := 0; i < objPerShard; i++ { + for i := range objPerShard { + // Create dummy container + cnr1 := container.Container{} + cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i)) contID := cidtest.ID() + cnrMap[contID] = &cnr1 + obj := testutil.GenerateObjectWithCID(contID) objects = append(objects, obj) @@ -76,6 +107,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng require.NoError(t, err) } } + e.SetContainerSource(&containerStorage{cntmap: cnrMap}) return e, ids, objects } @@ -108,16 +140,17 @@ func TestEvacuateShardObjects(t *testing.T) { prm.Scope = EvacuateScopeObjects t.Run("must be read-only", func(t *testing.T) { - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, ErrMustBeReadOnly) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) }) - require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.NoError(t, err) - require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated()) // We check that all objects are available both before and after shard removal. // First case is a real-world use-case. It ensures that an object can be put in presense @@ -125,34 +158,75 @@ func TestEvacuateShardObjects(t *testing.T) { // Second case ensures that all objects are indeed moved and available. checkHasObjects(t) + // Objects on evacuated shards should be logically unavailable, but persisted on disk. + // This is necessary to prevent removing it by policer in case of `REP 1` policy. + for _, obj := range objects[len(objects)-objPerShard:] { + var prmGet shard.GetPrm + prmGet.SetAddress(objectCore.AddressOf(obj)) + _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet) + require.Error(t, err) + + prmGet.SkipEvacCheck(true) + _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet) + require.NoError(t, err) + + var prmHead shard.HeadPrm + prmHead.SetAddress(objectCore.AddressOf(obj)) + _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead) + require.Error(t, err) + + var existsPrm shard.ExistsPrm + existsPrm.Address = objectCore.AddressOf(obj) + _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm) + require.Error(t, err) + + var rngPrm shard.RngPrm + rngPrm.SetAddress(objectCore.AddressOf(obj)) + _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm) + require.Error(t, err) + } + // Calling it again is OK, but all objects are already moved, so no new PUTs should be done. - res, err = e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st = testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(0), st.ObjectsEvacuated()) checkHasObjects(t) e.mtx.Lock() delete(e.shards, evacuateShardID) - delete(e.shardPools, evacuateShardID) e.mtx.Unlock() checkHasObjects(t) } +func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { + var st *EvacuationState + var err error + e.evacuateLimiter.waitForCompleted() + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + return st +} + func TestEvacuateObjectsNetwork(t *testing.T) { t.Parallel() errReplication := errors.New("handler error") acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) { - var n uint64 + var n atomic.Uint64 + var mtx sync.Mutex return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - if n == max { + mtx.Lock() + defer mtx.Unlock() + if n.Load() == max { return false, errReplication } - n++ + n.Add(1) for i := range objects { if addr == objectCore.AddressOf(objects[i]) { require.Equal(t, objects[i], obj) @@ -173,21 +247,21 @@ func TestEvacuateObjectsNetwork(t *testing.T) { evacuateShardID := ids[0].String() - require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[0:1] prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, errMustHaveTwoShards) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) prm.ObjectsHandler = acceptOneOf(objects, 2) - res, err = e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) }) t.Run("multiple shards, evacuate one", func(t *testing.T) { t.Parallel() @@ -196,24 +270,26 @@ func TestEvacuateObjectsNetwork(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[1:2] prm.ObjectsHandler = acceptOneOf(objects, 2) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, 3) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(3), st.ObjectsEvacuated()) }) }) t.Run("multiple shards, evacuate many", func(t *testing.T) { @@ -234,7 +310,7 @@ func TestEvacuateObjectsNetwork(t *testing.T) { } for i := range ids { - require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly)) } var prm EvacuateShardPrm @@ -242,16 +318,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount-1) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, totalCount-1, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, totalCount-1, st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, totalCount, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, totalCount, st.ObjectsEvacuated()) }) }) } @@ -263,8 +341,8 @@ func TestEvacuateCancellation(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -281,9 +359,39 @@ func TestEvacuateCancellation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - res, err := e.Evacuate(ctx, prm) + err := e.Evacuate(ctx, prm) require.ErrorContains(t, err, "context canceled") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) +} + +func TestEvacuateCancellationByError(t *testing.T) { + t.Parallel() + e, ids, _ := newEngineEvacuate(t, 2, 10) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) + + var prm EvacuateShardPrm + prm.ShardID = ids[1:2] + var once atomic.Bool + prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { + var err error + flag := true + if once.CompareAndSwap(false, true) { + err = errors.New("test error") + flag = false + } + return flag, err + } + prm.Scope = EvacuateScopeObjects + prm.ObjectWorkerCount = 2 + prm.ContainerWorkerCount = 2 + + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), "test error") } func TestEvacuateSingleProcess(t *testing.T) { @@ -292,11 +400,11 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -313,20 +421,19 @@ func TestEvacuateSingleProcess(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") return nil }) eg.Go(func() error { <-running - res, err := e.Evacuate(egCtx, prm) - require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed") close(blocker) return nil }) require.NoError(t, eg.Wait()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated()) + require.Equal(t, st.ErrorMessage(), "") } func TestEvacuateObjectsAsync(t *testing.T) { @@ -335,11 +442,11 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -365,9 +472,9 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) @@ -390,12 +497,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { close(blocker) - require.Eventually(t, func() bool { - st, err = e.GetEvacuationState(context.Background()) - return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 3*time.Second, 10*time.Millisecond, "invalid final state") - - require.NoError(t, err, "get final state failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") require.NotNil(t, st.StartedAt(), "invalid final started at") require.NotNil(t, st.FinishedAt(), "invalid final finished at") @@ -421,7 +523,7 @@ func TestEvacuateTreesLocal(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[0:1] @@ -441,14 +543,9 @@ func TestEvacuateTreesLocal(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation result must be not nil") - require.NoError(t, err, "evacuation failed") - - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -500,9 +597,10 @@ func TestEvacuateTreesRemote(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) + mutex := sync.Mutex{} evacuatedTreeOps := make(map[string][]*pilorama.Move) var prm EvacuateShardPrm prm.ShardID = ids @@ -517,7 +615,9 @@ func TestEvacuateTreesRemote(t *testing.T) { if op.Time == 0 { return true, "", nil } + mutex.Lock() evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op) + mutex.Unlock() height = op.Time + 1 } } @@ -536,15 +636,9 @@ func TestEvacuateTreesRemote(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation must return not nil") - require.NoError(t, err, "evacuation failed") + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) - - require.NoError(t, err, "get final state failed") require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -554,7 +648,7 @@ func TestEvacuateTreesRemote(t *testing.T) { require.Equal(t, "", st.ErrorMessage(), "invalid final error message") expectedTreeOps := make(map[string][]*pilorama.Move) - for i := 0; i < len(e.shards); i++ { + for i := range len(e.shards) { sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()]) require.NoError(t, err, "list source trees failed") require.Len(t, sourceTrees, 3) @@ -577,3 +671,157 @@ func TestEvacuateTreesRemote(t *testing.T) { require.Equal(t, expectedTreeOps, evacuatedTreeOps) } + +func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { + e, ids, _ := newEngineEvacuate(t, 2, 0) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + // Create container with policy REP 2 + cnr1 := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(2) + p1.AddReplicas(x1) + x1 = netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(1) + p1.AddReplicas(x1) + cnr1.SetPlacementPolicy(p1) + cnr1.SetAttribute("cnr", "cnr1") + + var idCnr1 cid.ID + container.CalculateID(&idCnr1, cnr1) + + cnrmap := make(map[cid.ID]*container.Container) + var cids []cid.ID + cnrmap[idCnr1] = &cnr1 + cids = append(cids, idCnr1) + + // Create container with policy REP 1 + cnr2 := container.Container{} + p2 := netmap.PlacementPolicy{} + p2.SetContainerBackupFactor(1) + x2 := netmap.ReplicaDescriptor{} + x2.SetNumberOfObjects(1) + p2.AddReplicas(x2) + x2 = netmap.ReplicaDescriptor{} + x2.SetNumberOfObjects(1) + p2.AddReplicas(x2) + cnr2.SetPlacementPolicy(p2) + cnr2.SetAttribute("cnr", "cnr2") + + var idCnr2 cid.ID + container.CalculateID(&idCnr2, cnr2) + cnrmap[idCnr2] = &cnr2 + cids = append(cids, idCnr2) + + // Create container for simulate removing + cnr3 := container.Container{} + p3 := netmap.PlacementPolicy{} + p3.SetContainerBackupFactor(1) + x3 := netmap.ReplicaDescriptor{} + x3.SetNumberOfObjects(1) + p3.AddReplicas(x3) + cnr3.SetPlacementPolicy(p3) + cnr3.SetAttribute("cnr", "cnr3") + + var idCnr3 cid.ID + container.CalculateID(&idCnr3, cnr3) + cids = append(cids, idCnr3) + + e.SetContainerSource(&containerStorage{cntmap: cnrmap}) + + for _, sh := range ids { + for j := range 3 { + for range 4 { + obj := testutil.GenerateObjectWithCID(cids[j]) + var putPrm shard.PutPrm + putPrm.SetObject(obj) + _, err := e.shards[sh.String()].Put(context.Background(), putPrm) + require.NoError(t, err) + } + } + } + + var prm EvacuateShardPrm + prm.ShardID = ids[0:1] + prm.Scope = EvacuateScopeObjects + prm.RepOneOnly = true + + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, "", st.ErrorMessage()) + require.Equal(t, uint64(4), st.ObjectsEvacuated()) + require.Equal(t, uint64(8), st.ObjectsSkipped()) + require.Equal(t, uint64(0), st.ObjectsFailed()) +} + +func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { + t.Skip() + e, ids, _ := newEngineEvacuate(t, 2, 0) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + cnrmap := make(map[cid.ID]*container.Container) + var cids []cid.ID + // Create containers with policy REP 1 + for i := range 10_000 { + cnr1 := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(2) + p1.AddReplicas(x1) + cnr1.SetPlacementPolicy(p1) + cnr1.SetAttribute("i", strconv.Itoa(i)) + + var idCnr1 cid.ID + container.CalculateID(&idCnr1, cnr1) + + cnrmap[idCnr1] = &cnr1 + cids = append(cids, idCnr1) + } + + e.SetContainerSource(&containerStorage{ + cntmap: cnrmap, + latency: time.Millisecond * 100, + }) + + for _, cnt := range cids { + for range 1 { + obj := testutil.GenerateObjectWithCID(cnt) + var putPrm shard.PutPrm + putPrm.SetObject(obj) + _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm) + require.NoError(t, err) + } + } + + var prm EvacuateShardPrm + prm.ShardID = ids[0:1] + prm.Scope = EvacuateScopeObjects + prm.RepOneOnly = true + prm.ContainerWorkerCount = 10 + + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + + start := time.Now() + err := e.Evacuate(context.Background(), prm) + testWaitForEvacuationCompleted(t, e) + t.Logf("evacuate took %v\n", time.Since(start)) + require.NoError(t, err) +} + +func (l *evacuationLimiter) waitForCompleted() { + l.guard.Lock() + defer l.guard.Unlock() + + for l.state.processState != EvacuateProcessStateCompleted { + l.statusCond.Wait() + } +} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index c57f79691..7dac9eb97 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "go.uber.org/zap" ) // exists return in the first value true if object exists. @@ -17,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -36,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check existence of object in shard", err) + e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) } return false } @@ -49,7 +50,9 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }) + }); err != nil { + return false, false, err + } if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go new file mode 100644 index 000000000..9b3c0833f --- /dev/null +++ b/pkg/local_object_storage/engine/exists_test.go @@ -0,0 +1,51 @@ +package engine + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + "github.com/stretchr/testify/require" +) + +func BenchmarkExists(b *testing.B) { + b.Run("2 shards", func(b *testing.B) { + benchmarkExists(b, 2) + }) + b.Run("4 shards", func(b *testing.B) { + benchmarkExists(b, 4) + }) + b.Run("8 shards", func(b *testing.B) { + benchmarkExists(b, 8) + }) +} + +func benchmarkExists(b *testing.B, shardNum int) { + e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine + defer func() { require.NoError(b, e.Close(context.Background())) }() + + addr := oidtest.Address() + for range 100 { + obj := testutil.GenerateObjectWithCID(cidtest.ID()) + err := Put(context.Background(), e, obj, false) + if err != nil { + b.Fatal(err) + } + } + + b.ReportAllocs() + b.ResetTimer() + for range b.N { + var shPrm shard.ExistsPrm + shPrm.Address = addr + shPrm.ECParentAddress = oid.Address{} + ok, _, err := e.exists(context.Background(), shPrm) + if err != nil || ok { + b.Fatalf("%t %v", ok, err) + } + } +} diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 991af3d1a..0694c53f3 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -56,6 +55,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er attribute.String("address", prm.addr.EncodeToString()), )) defer span.End() + defer elapsed("Get", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e.get(ctx, prm) @@ -66,10 +66,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er } func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { - if e.metrics != nil { - defer elapsed("Get", e.metrics.AddMethodDuration)() - } - errNotFound := new(apistatus.ObjectNotFound) var shPrm shard.GetPrm @@ -82,7 +78,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return GetRes{}, err + } if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -101,17 +99,18 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - it.tryGetFromBlobstore(ctx) + if err := it.tryGetFromBlobstore(ctx); err != nil { + return GetRes{}, err + } if it.Object == nil { return GetRes{}, it.OutError } if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound, + e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(it.MetaError), + zap.Stringer("address", prm.addr)) } } @@ -138,8 +137,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -186,19 +185,19 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { i.ObjectExpired = true return true default: - i.Engine.reportShardError(sh, "could not get object from shard", err) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index 92d1b20fc..d436dd411 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -12,6 +12,7 @@ import ( apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.uber.org/zap" ) // HeadPrm groups the parameters of Head operation. @@ -67,9 +68,7 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head") defer span.End() - if e.metrics != nil { - defer elapsed("Head", e.metrics.AddMethodDuration)() - } + defer elapsed("Head", e.metrics.AddMethodDuration)() var ( head *objectSDK.Object @@ -83,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -118,25 +117,26 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) outError = new(apistatus.ObjectNotFound) return true default: - e.reportShardError(sh, "could not head object from shard", err) + e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) return false } } head = res.Object() return true - }) - - if outSI != nil { - return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI)) - } else if outEI != nil { - return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI)) - } else if head == nil { - return HeadRes{}, outError + }); err != nil { + return HeadRes{}, err } - return HeadRes{ - head: head, - }, nil + if head != nil { + return HeadRes{head: head}, nil + } + if outSI != nil { + return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI)) + } + if outEI != nil { + return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI)) + } + return HeadRes{}, outError } // Head reads object header from local storage by provided address. diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index 5afc50f07..f9db81f16 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) { link.SetSplitID(splitID) t.Run("virtual object split in different shards", func(t *testing.T) { - s1 := testNewShard(t) - s2 := testNewShard(t) + te := testNewEngine(t).setShardsNum(t, 2).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() - e := testNewEngine(t).setInitializedShards(t, s1, s2).engine - defer e.Close(context.Background()) + s1, s2 := te.shards[0], te.shards[1] var putPrmLeft shard.PutPrm putPrmLeft.SetObject(child) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 991305af0..e5f7072e2 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -27,9 +26,6 @@ type InhumePrm struct { forceRemoval bool } -// InhumeRes encapsulates results of inhume operation. -type InhumeRes struct{} - // WithTarget sets a list of objects that should be inhumed and tombstone address // as the reason for inhume operation. // @@ -67,21 +63,20 @@ var errInhumeFailure = errors.New("inhume operation failed") // with that object) if WithForceRemoval option has been provided. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { +func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") defer span.End() + defer elapsed("Inhume", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.inhume(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.inhume(ctx, prm) }) - - return } -func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { - if e.metrics != nil { - defer elapsed("Inhume", e.metrics.AddMethodDuration)() +func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { + addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + if err != nil { + return err } var shPrm shard.InhumePrm @@ -89,105 +84,205 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e shPrm.ForceRemoval() } - for i := range prm.addrs { - if !prm.forceRemoval { - locked, err := e.IsLocked(ctx, prm.addrs[i]) - if err != nil { - e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck, - zap.Error(err), - zap.Stringer("addr", prm.addrs[i]), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } else if locked { - return InhumeRes{}, new(apistatus.ObjectLocked) - } - } - + for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, prm.addrs[i]) + shPrm.SetTarget(*prm.tombstone, addrs...) } else { - shPrm.MarkAsGarbage(prm.addrs[i]) + shPrm.MarkAsGarbage(addrs...) } - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true) - if err != nil { - return InhumeRes{}, err + sh, exists := e.shards[shardID] + if !exists { + e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, + zap.Error(errors.New("this shard was expected to exist")), + zap.String("shard_id", shardID), + ) + return errInhumeFailure } - if !ok { - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false) - if err != nil { - return InhumeRes{}, err - } else if !ok { - return InhumeRes{}, errInhumeFailure - } + + if _, err := sh.Inhume(ctx, shPrm); err != nil { + e.reportInhumeError(ctx, err, sh) + return err } } - return InhumeRes{}, nil + return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) } -// Returns ok if object was inhumed during this invocation or before. -func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) { - root := false - var existPrm shard.ExistsPrm - var retErr error - var ok bool +func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { + if err == nil { + return + } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { - defer func() { - // if object is root we continue since information about it - // can be presented in other shards - if checkExists && root { - stop = false - } - }() + var errLocked *apistatus.ObjectLocked + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) + } +} - if checkExists { - existPrm.Address = addr - exRes, err := sh.Exists(ctx, existPrm) - if err != nil { - if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) { - // inhumed once - no need to be inhumed again - ok = true - return true - } +// inhumeNotFoundObjects removes object which are not found on any shard. +// +// Besides an object not being found on any shard, it is also important to +// remove it anyway in order to populate the metabase indexes because they are +// responsible for the correct object status, i.e., the status will be `object +// not found` without the indexes, the status will be `object is already +// removed` with the indexes. +// +// It is suggested to evenly remove those objects on each shard with the batch +// size equal to 1 + floor(number of objects / number of shards). +func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { + if len(addrs) == 0 { + return nil + } - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) { - e.reportShardError(sh, "could not check for presents in shard", err) - return - } + var shPrm shard.InhumePrm + if prm.forceRemoval { + shPrm.ForceRemoval() + } - root = true - } else if !exRes.Exists() { - return - } + numObjectsPerShard := 1 + len(addrs)/len(e.shards) + + var inhumeErr error + itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + numObjects := min(numObjectsPerShard, len(addrs)) + + if numObjects == 0 { + return true } - _, err := sh.Inhume(ctx, prm) + if prm.tombstone != nil { + shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) + } else { + shPrm.MarkAsGarbage(addrs[:numObjects]...) + } + addrs = addrs[numObjects:] + + _, inhumeErr = hs.Inhume(ctx, shPrm) + e.reportInhumeError(ctx, inhumeErr, hs) + return inhumeErr != nil + }) + if inhumeErr != nil { + return inhumeErr + } + return itErr +} + +// groupObjectsByShard groups objects based on the shard(s) they are stored on. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +// +// Returns two sets of objects: found objects which are grouped per shard and +// not found object. Not found objects are objects which are not found on any +// shard. This can happen if a node is a container node but doesn't participate +// in a replica group of the object. +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { + groups = make(map[string][]oid.Address) + + var ids []string + for _, addr := range addrs { + ids, err = e.findShards(ctx, addr, checkLocked) if err != nil { - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): + return + } + + if len(ids) == 0 { + notFoundObjects = append(notFoundObjects, addr) + continue + } + + for _, id := range ids { + groups[id] = append(groups[id], addr) + } + } + + return +} + +// findShards determines the shard(s) where the object is stored. +// +// If the object is a root object, multiple shards will be returned. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) { + var ( + ids []string + retErr error + + prm shard.ExistsPrm + + siErr *objectSDK.SplitInfoError + ecErr *objectSDK.ECInfoError + + isRootObject bool + objectExists bool + ) + + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + objectExists = false + + prm.Address = addr + switch res, err := sh.Exists(ctx, prm); { + case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err): + // NOTE(@a-savchuk): there were some considerations that we can stop + // immediately if the object is already removed or expired. However, + // the previous method behavior was: + // - keep iterating if it's a root object and already removed, + // - stop iterating if it's not a root object and removed. + // + // Since my task was only improving method speed, let's keep the + // previous method behavior. Continue if it's a root object. + return !isRootObject + case errors.As(err, &siErr) || errors.As(err, &ecErr): + isRootObject = true + objectExists = true + case err != nil: + e.reportShardError( + ctx, sh, "couldn't check for presence in shard", + err, zap.Stringer("address", addr), + ) + case res.Exists(): + objectExists = true + default: + } + + if checkLocked { + if isLocked, err := sh.IsLocked(ctx, addr); err != nil { + e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, + zap.Error(err), + zap.Stringer("address", addr), + ) + } else if isLocked { retErr = new(apistatus.ObjectLocked) return true - case errors.Is(err, shard.ErrLockObjectRemoval): - retErr = meta.ErrLockObjectRemoval - return true - case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode): - retErr = err - return true } - - e.reportShardError(sh, "could not inhume object in shard", err) - return false } - ok = true - return true - }) + // This exit point must come after checking if the object is locked, + // since the locked index may be populated even if the object doesn't + // exist. + if !objectExists { + return + } - return ok, retErr + ids = append(ids, sh.ID().String()) + + // Continue if it's a root object. + return !isRootObject + }); err != nil { + return nil, err + } + + if retErr != nil { + return nil, retErr + } + return ids, nil } // IsLocked checks whether an object is locked according to StorageEngine's state. @@ -202,17 +297,18 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locked, err = h.Shard.IsLocked(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locked, err = h.IsLocked(ctx, addr) if err != nil { - e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err return false } return locked - }) + }); err != nil { + return false, err + } if locked { return locked, nil @@ -221,94 +317,99 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e return locked, outErr } -// GetLocked return lock id's if object is locked according to StorageEngine's state. -func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocked", +// GetLocks return lock id's if object is locked according to StorageEngine's state. +func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) defer span.End() - var locked []oid.ID + var allLocks []oid.ID var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - ld, err := h.Shard.GetLocked(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locks, err := h.GetLocks(ctx, addr) if err != nil { - e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("addr", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err } - locked = append(locked, ld...) + allLocks = append(allLocks, locks...) return false - }) - if len(locked) > 0 { - return locked, nil + }); err != nil { + return nil, err } - return locked, outErr + if len(allLocks) > 0 { + return allLocks, nil + } + return allLocks, outErr } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) + } } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) + } } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - sh.HandleDeletedLocks(lockers) + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + sh.HandleDeletedLocks(ctx, lockers) select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) + } } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerSizePrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -317,9 +418,9 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(prm) + s, err := sh.ContainerSize(ctx, prm) if err != nil { - e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -332,16 +433,18 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -349,19 +452,20 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid for id := range idMap { if err := sh.DeleteContainerSize(ctx, id); err != nil { - e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -371,22 +475,19 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerCountPrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -397,7 +498,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci prm.ContainerID = id s, err := sh.ContainerCount(ctx, prm) if err != nil { - e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -410,16 +511,18 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -427,19 +530,20 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci for id := range idMap { if err := sh.DeleteContainerCount(ctx, id); err != nil { - e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } @@ -452,7 +556,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) for _, id := range ids { isAvailable, err := cs.IsContainerAvailable(ctx, id) if err != nil { - e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err)) return nil, err } if isAvailable { diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 9daa113f8..0e268cd23 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -2,14 +2,24 @@ package engine import ( "context" + "fmt" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestStorageEngine_Inhume(t *testing.T) { @@ -37,30 +47,31 @@ func TestStorageEngine_Inhume(t *testing.T) { t.Run("delete small object", func(t *testing.T) { t.Parallel() - e := testNewEngine(t).setShardsNum(t, 1).engine - defer e.Close(context.Background()) + e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() - err := Put(context.Background(), e, parent) + err := Put(context.Background(), e, parent, false) require.NoError(t, err) var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(context.Background(), e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, false, fs) require.NoError(t, err) require.Empty(t, addrs) }) t.Run("delete big object", func(t *testing.T) { t.Parallel() - s1 := testNewShard(t) - s2 := testNewShard(t) - e := testNewEngine(t).setInitializedShards(t, s1, s2).engine - defer e.Close(context.Background()) + te := testNewEngine(t).setShardsNum(t, 2).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() + + s1, s2 := te.shards[0], te.shards[1] var putChild shard.PutPrm putChild.SetObject(child) @@ -75,11 +86,257 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(context.Background(), e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, false, fs) require.NoError(t, err) require.Empty(t, addrs) }) } + +func TestStorageEngine_ECInhume(t *testing.T) { + parentObjectAddress := oidtest.Address() + containerID := parentObjectAddress.Container() + + chunkObject0 := testutil.GenerateObjectWithCID(containerID) + chunkObject0.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 0, 4, []byte{}, 0)) + + chunkObject1 := testutil.GenerateObjectWithCID(containerID) + chunkObject1.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 1, 4, []byte{}, 0)) + + tombstone := objectSDK.NewTombstone() + tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()}) + payload, err := tombstone.Marshal() + require.NoError(t, err) + tombstoneObject := testutil.GenerateObjectWithCID(containerID) + tombstoneObject.SetType(objectSDK.TypeTombstone) + tombstoneObject.SetPayload(payload) + tombstoneObjectAddress := object.AddressOf(tombstoneObject) + + e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() + + require.NoError(t, Put(context.Background(), e, chunkObject0, false)) + + require.NoError(t, Put(context.Background(), e, tombstoneObject, false)) + + var inhumePrm InhumePrm + inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) + err = e.Inhume(context.Background(), inhumePrm) + require.NoError(t, err) + + var alreadyRemoved *apistatus.ObjectAlreadyRemoved + + require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved) + + require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved) +} + +func TestInhumeExpiredRegularObject(t *testing.T) { + t.Parallel() + + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + generateAndPutObject := func() *objectSDK.Object { + obj := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + var putPrm PutPrm + putPrm.Object = obj + require.NoError(t, engine.Put(context.Background(), putPrm)) + return obj + } + + t.Run("inhume with tombstone", func(t *testing.T) { + obj := generateAndPutObject() + ts := oidtest.Address() + ts.SetContainer(cnr) + + var prm InhumePrm + prm.WithTarget(ts, object.AddressOf(obj)) + err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) + + t.Run("inhume without tombstone", func(t *testing.T) { + obj := generateAndPutObject() + + var prm InhumePrm + prm.MarkAsGarbage(object.AddressOf(obj)) + err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) +} + +func BenchmarkInhumeMultipart(b *testing.B) { + // The benchmark result insignificantly depends on the number of shards, + // so do not use it as a benchmark parameter, just set it big enough. + numShards := 100 + + for numObjects := 1; numObjects <= 10000; numObjects *= 10 { + b.Run( + fmt.Sprintf("objects=%d", numObjects), + func(b *testing.B) { + benchmarkInhumeMultipart(b, numShards, numObjects) + }, + ) + } +} + +func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { + b.StopTimer() + + engine := testNewEngine(b). + setShardsNum(b, numShards).prepare(b).engine + defer func() { require.NoError(b, engine.Close(context.Background())) }() + + cnt := cidtest.ID() + eg := errgroup.Group{} + + for range b.N { + addrs := make([]oid.Address, numObjects) + + for i := range numObjects { + prm := PutPrm{} + + prm.Object = objecttest.Object().Parent() + prm.Object.SetContainerID(cnt) + prm.Object.SetType(objectSDK.TypeRegular) + + addrs[i] = object.AddressOf(prm.Object) + + eg.Go(func() error { + return engine.Put(context.Background(), prm) + }) + } + require.NoError(b, eg.Wait()) + + ts := oidtest.Address() + ts.SetContainer(cnt) + + prm := InhumePrm{} + prm.WithTarget(ts, addrs...) + + b.StartTimer() + err := engine.Inhume(context.Background(), prm) + require.NoError(b, err) + b.StopTimer() + } +} + +func TestInhumeIfObjectDoesntExist(t *testing.T) { + const numShards = 4 + + engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine + t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) + + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, true) + }) + + t.Run("object is locked", func(t *testing.T) { + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, true) + }) + }) +} + +func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) + + err := testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testLockObject(e, object)) + + err := testInhumeObject(t, e, object, withTombstone, withForce) + if !withForce { + var errLocked *apistatus.ObjectLocked + require.ErrorAs(t, err, &errLocked) + return + } + require.NoError(t, err) + + err = testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testLockObject(e *StorageEngine, obj oid.Address) error { + return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) +} + +func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { + tombstone := oidtest.Address() + tombstone.SetContainer(obj.Container()) + + // Due to the tests design it is possible to set both the options, + // however removal with tombstone and force removal are exclusive. + require.False(t, withTombstone && withForce) + + var inhumePrm InhumePrm + if withTombstone { + inhumePrm.WithTarget(tombstone, obj) + } else { + inhumePrm.MarkAsGarbage(obj) + } + if withForce { + inhumePrm.WithForceRemoval() + } + return e.Inhume(context.Background(), inhumePrm) +} + +func testHeadObject(e *StorageEngine, obj oid.Address) error { + var headPrm HeadPrm + headPrm.WithAddress(obj) + + _, err := e.Head(context.Background(), headPrm) + return err +} diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go index cb3830b7c..073248862 100644 --- a/pkg/local_object_storage/engine/list.go +++ b/pkg/local_object_storage/engine/list.go @@ -7,6 +7,7 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" ) // ErrEndOfListing is returned from an object listing with cursor @@ -98,6 +99,10 @@ func (l ListWithCursorRes) Cursor() *Cursor { // Returns ErrEndOfListing if there are no more objects to return or count // parameter set to zero. func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor") + defer span.End() + defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)() + result := make([]objectcore.Info, 0, prm.count) // Set initial cursors diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index dd8a2e8a0..6cfa546f8 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -68,10 +68,7 @@ func TestListWithCursor(t *testing.T) { meta.WithEpochState(epochState{}), ), } - }).engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - + }).prepare(t).engine defer func() { require.NoError(t, e.Close(context.Background())) }() @@ -79,14 +76,10 @@ func TestListWithCursor(t *testing.T) { expected := make([]object.Info, 0, tt.objectNum) got := make([]object.Info, 0, tt.objectNum) - for i := 0; i < tt.objectNum; i++ { + for range tt.objectNum { containerID := cidtest.ID() obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) - - var prm PutPrm - prm.WithObject(obj) - - err := e.Put(context.Background(), prm) + err := e.Put(context.Background(), PutPrm{Object: obj}) require.NoError(t, err) expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)}) } diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 5354c205f..3b0cf74f9 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -13,6 +13,7 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" ) var errLockFailed = errors.New("lock operation failed") @@ -31,6 +32,7 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l attribute.Int("locked_count", len(locked)), )) defer span.End() + defer elapsed("Lock", e.metrics.AddMethodDuration)() return e.execIfNotBlocked(func() error { return e.lock(ctx, idCnr, locker, locked) @@ -39,11 +41,19 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { + st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { + st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -59,15 +69,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { // code is pretty similar to inhumeAddr, maybe unify? root := false - var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - - e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { + retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards @@ -79,25 +87,20 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo if checkExists { var existsPrm shard.ExistsPrm existsPrm.Address = addrLocked - exRes, err := sh.Exists(ctx, existsPrm) if err != nil { var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err = objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(sh, "could not lock object in shard", err) - return false - } - eclocked = append(eclocked, objID) + eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) + if !ok { + return false } + err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err) + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return false } root = true @@ -108,8 +111,8 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo // do not lock it return true } - - e.reportShardError(sh, "could not check locked object for presence in shard", err) + e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return } @@ -121,21 +124,33 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked}) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err) + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) var errIrregular *apistatus.LockNonRegularObject if errors.As(err, &errIrregular) { status = 1 return true } - return false } - status = 2 - return true }) - return } + +func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err := objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return nil, false + } + eclocked = append(eclocked, objID) + } + return eclocked, true +} diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 7fa7c27ef..b8c9d6b1d 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -57,11 +57,9 @@ func TestLockUserScenario(t *testing.T) { }), shard.WithTombstoneSource(tss{lockerExpiresAfter}), } - }) + }). + prepare(t) e := testEngine.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - defer func() { require.NoError(t, e.Close(context.Background())) }() lockerID := oidtest.ID() @@ -97,7 +95,7 @@ func TestLockUserScenario(t *testing.T) { id, _ := obj.ID() objAddr.SetObject(id) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. @@ -105,7 +103,7 @@ func TestLockUserScenario(t *testing.T) { locker.WriteMembers([]oid.ID{id}) objectSDK.WriteLock(lockerObj, locker) - err = Put(context.Background(), e, lockerObj) + err = Put(context.Background(), e, lockerObj, false) require.NoError(t, err) err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id}) @@ -116,7 +114,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -124,12 +122,12 @@ func TestLockUserScenario(t *testing.T) { tombObj.SetID(tombForLockID) tombObj.SetAttributes(a) - err = Put(context.Background(), e, tombObj) + err = Put(context.Background(), e, tombObj, false) require.NoError(t, err) inhumePrm.WithTarget(tombForLockAddr, lockerAddr) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorIs(t, err, meta.ErrLockObjectRemoval) // 5. @@ -138,7 +136,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -162,11 +160,9 @@ func TestLockExpiration(t *testing.T) { return pool }), } - }) + }). + prepare(t) e := testEngine.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - defer func() { require.NoError(t, e.Close(context.Background())) }() const lockerExpiresAfter = 13 @@ -177,7 +173,7 @@ func TestLockExpiration(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. @@ -189,7 +185,7 @@ func TestLockExpiration(t *testing.T) { lock.SetType(objectSDK.TypeLock) lock.SetAttributes(a) - err = Put(context.Background(), e, lock) + err = Put(context.Background(), e, lock, false) require.NoError(t, err) id, _ := obj.ID() @@ -199,20 +195,24 @@ func TestLockExpiration(t *testing.T) { require.NoError(t, err) var inhumePrm InhumePrm - inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) + tombAddr := oidtest.Address() + tombAddr.SetContainer(cnr) + inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 3. e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1) // 4. - inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) + tombAddr = oidtest.Address() + tombAddr.SetContainer(cnr) + inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -239,9 +239,8 @@ func TestLockForceRemoval(t *testing.T) { }), shard.WithDeletedLockCallback(e.processDeletedLocks), } - }).engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) + }). + prepare(t).engine defer func() { require.NoError(t, e.Close(context.Background())) }() cnr := cidtest.ID() @@ -250,14 +249,14 @@ func TestLockForceRemoval(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(objectSDK.TypeLock) - err = Put(context.Background(), e, lock) + err = Put(context.Background(), e, lock, false) require.NoError(t, err) id, _ := obj.ID() @@ -271,12 +270,12 @@ func TestLockForceRemoval(t *testing.T) { inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -284,12 +283,64 @@ func TestLockForceRemoval(t *testing.T) { deletePrm.WithAddress(objectcore.AddressOf(lock)) deletePrm.WithForceRemoval() - _, err = e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) // 5. inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) } + +func TestLockExpiredRegularObject(t *testing.T) { + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + object := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + address := objectcore.AddressOf(object) + + var putPrm PutPrm + putPrm.Object = object + require.NoError(t, engine.Put(context.Background(), putPrm)) + + var getPrm GetPrm + var errNotFound *apistatus.ObjectNotFound + + getPrm.WithAddress(address) + _, err := engine.Get(context.Background(), getPrm) + require.ErrorAs(t, err, &errNotFound) + + t.Run("lock expired regular object", func(t *testing.T) { + engine.Lock(context.Background(), + address.Container(), + oidtest.ID(), + []oid.ID{address.Object()}, + ) + + res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object)) + require.NoError(t, err) + require.True(t, res) + }) + + t.Run("get expired and locked regular object", func(t *testing.T) { + getPrm.WithAddress(objectcore.AddressOf(object)) + + res, err := engine.Get(context.Background(), getPrm) + require.NoError(t, err) + require.Equal(t, res.Object(), object) + }) +} diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go index 72b5ae252..963292d83 100644 --- a/pkg/local_object_storage/engine/metrics.go +++ b/pkg/local_object_storage/engine/metrics.go @@ -7,33 +7,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) -type MetricRegister interface { - AddMethodDuration(method string, d time.Duration) - - SetObjectCounter(shardID, objectType string, v uint64) - AddToObjectCounter(shardID, objectType string, delta int) - - SetMode(shardID string, mode mode.Mode) - - AddToContainerSize(cnrID string, size int64) - DeleteContainerSize(cnrID string) - DeleteContainerCount(cnrID string) - AddToPayloadCounter(shardID string, size int64) - IncErrorCounter(shardID string) - ClearErrorCounter(shardID string) - DeleteShardMetrics(shardID string) - - SetContainerObjectCounter(shardID, contID, objectType string, v uint64) - IncContainerObjectCounter(shardID, contID, objectType string) - SubContainerObjectCounter(shardID, contID, objectType string, v uint64) - - IncRefillObjectsCount(shardID, path string, size int, success bool) - SetRefillPercent(shardID, path string, percent uint32) - SetRefillStatus(shardID, path, status string) - - WriteCache() metrics.WriteCacheMetrics - GC() metrics.GCMetrics -} +type ( + MetricRegister = metrics.EngineMetrics + GCMetrics = metrics.GCMetrics + WriteCacheMetrics = metrics.WriteCacheMetrics + NullBool = metrics.NullBool +) func elapsed(method string, addFunc func(method string, d time.Duration)) func() { t := time.Now() @@ -67,3 +46,48 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) { m.storage.AddInhumedObjectCount(m.shardID, count, objectType) } + +type ( + noopMetrics struct{} + noopWriteCacheMetrics struct{} + noopGCMetrics struct{} +) + +var ( + _ MetricRegister = noopMetrics{} + _ WriteCacheMetrics = noopWriteCacheMetrics{} + _ GCMetrics = noopGCMetrics{} +) + +func (noopMetrics) AddMethodDuration(string, time.Duration) {} +func (noopMetrics) SetObjectCounter(string, string, uint64) {} +func (noopMetrics) AddToObjectCounter(string, string, int) {} +func (noopMetrics) SetMode(string, mode.Mode) {} +func (noopMetrics) AddToContainerSize(string, int64) {} +func (noopMetrics) DeleteContainerSize(string) {} +func (noopMetrics) DeleteContainerCount(string) {} +func (noopMetrics) AddToPayloadCounter(string, int64) {} +func (noopMetrics) IncErrorCounter(string) {} +func (noopMetrics) ClearErrorCounter(string) {} +func (noopMetrics) DeleteShardMetrics(string) {} +func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {} +func (noopMetrics) IncContainerObjectCounter(string, string, string) {} +func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {} +func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} +func (noopMetrics) SetRefillPercent(string, string, uint32) {} +func (noopMetrics) SetRefillStatus(string, string, string) {} +func (noopMetrics) SetEvacuationInProgress(string, bool) {} +func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} } +func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} } + +func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} +func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} +func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {} +func (noopWriteCacheMetrics) SetMode(string, string) {} +func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {} +func (noopWriteCacheMetrics) Close(string, string) {} + +func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {} +func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {} +func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {} +func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 54385910b..10cf5ffd5 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,8 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -22,7 +20,8 @@ import ( // PutPrm groups the parameters of Put operation. type PutPrm struct { - obj *objectSDK.Object + Object *objectSDK.Object + IsIndexedContainer bool } var errPutShard = errors.New("could not put object to any shard") @@ -41,13 +40,6 @@ type putToShardRes struct { err error } -// WithObject is a Put option to set object to save. -// -// Option is required. -func (p *PutPrm) WithObject(obj *objectSDK.Object) { - p.obj = obj -} - // Put saves the object to local storage. // // Returns any error encountered that @@ -59,9 +51,10 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) { func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put", trace.WithAttributes( - attribute.String("address", object.AddressOf(prm.obj).EncodeToString()), + attribute.String("address", object.AddressOf(prm.Object).EncodeToString()), )) defer span.End() + defer elapsed("Put", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { err = e.put(ctx, prm) @@ -72,29 +65,25 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { } func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { - if e.metrics != nil { - defer elapsed("Put", e.metrics.AddMethodDuration)() - } - - addr := object.AddressOf(prm.obj) + addr := object.AddressOf(prm.Object) // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. - var parent oid.Address - if prm.obj.ECHeader() != nil { - parent.SetObject(prm.obj.ECHeader().Parent()) - parent.SetContainer(addr.Container()) + var ecParent oid.Address + if prm.Object.ECHeader() != nil { + ecParent.SetObject(prm.Object.ECHeader().Parent()) + ecParent.SetContainer(addr.Container()) } var shPrm shard.ExistsPrm shPrm.Address = addr - shPrm.ParentAddress = parent + shPrm.ECParentAddress = ecParent existed, locked, err := e.exists(ctx, shPrm) if err != nil { return err } if !existed && locked { - lockers, err := e.GetLocked(ctx, parent) + lockers, err := e.GetLocks(ctx, ecParent) if err != nil { return err } @@ -107,17 +96,19 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() - pool, ok := e.shardPools[sh.ID().String()] + _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.obj) + shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }) + }); err != nil { + return err + } switch shRes.status { case putToShardUnknown: return errPutShard @@ -132,80 +123,64 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, - addr oid.Address, obj *objectSDK.Object, +func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, + addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { - exitCh := make(chan struct{}) + var existPrm shard.ExistsPrm + existPrm.Address = addr - if err := pool.Submit(func() { - defer close(exitCh) - - var existPrm shard.ExistsPrm - existPrm.Address = addr - - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it - res.status = putToShardExists - } else { - e.log.Warn(logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { + exists, err := sh.Exists(ctx, existPrm) + if err != nil { + if shard.IsErrObjectExpired(err) { + // object is already found but + // expired => do nothing with it res.status = putToShardExists - return + } else { + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) } - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - res.status = putToShardRemoved - res.err = err - return - } - - e.reportShardError(sh, "could not put object to shard", err) - return - } - - res.status = putToShardSuccess - }); err != nil { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err)) - close(exitCh) + return // this is not ErrAlreadyRemoved error so we can go to the next shard } - <-exitCh + if exists.Exists() { + res.status = putToShardExists + return + } + + var putPrm shard.PutPrm + putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) + + _, err = sh.Put(ctx, putPrm) + if err != nil { + if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || + errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + return + } + if client.IsErrObjectAlreadyRemoved(err) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + res.status = putToShardRemoved + res.err = err + return + } + + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) + return + } + + res.status = putToShardSuccess return } // Put writes provided object to local storage. -func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error { - var putPrm PutPrm - putPrm.WithObject(obj) - - return storage.Put(ctx, putPrm) +func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error { + return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer}) } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index f5b33a251..7ec4742d8 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -65,6 +64,15 @@ func (r RngRes) Object() *objectSDK.Object { // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + attribute.String("offset", strconv.FormatUint(prm.off, 10)), + attribute.String("length", strconv.FormatUint(prm.ln, 10)), + )) + defer span.End() + defer elapsed("GetRange", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { res, err = e.getRange(ctx, prm) return err @@ -74,18 +82,6 @@ func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, e } func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.String("offset", strconv.FormatUint(prm.off, 10)), - attribute.String("length", strconv.FormatUint(prm.ln, 10)), - )) - defer span.End() - - if e.metrics != nil { - defer elapsed("GetRange", e.metrics.AddMethodDuration)() - } - var shPrm shard.RngPrm shPrm.SetAddress(prm.addr) shPrm.SetRange(prm.off, prm.ln) @@ -97,7 +93,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return RngRes{}, err + } if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -113,17 +111,18 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - it.tryGetFromBlobstor(ctx) + if err := it.tryGetFromBlobstor(ctx); err != nil { + return RngRes{}, err + } if it.Object == nil { return RngRes{}, it.OutError } if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound, + e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(it.MetaError), + zap.Stringer("address", prm.addr)) } } @@ -162,8 +161,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -208,19 +207,19 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { return true // stop, return it back default: - i.Engine.reportShardError(sh, "could not get object from shard", err) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go new file mode 100644 index 000000000..a29dd7ed9 --- /dev/null +++ b/pkg/local_object_storage/engine/rebuild.go @@ -0,0 +1,108 @@ +package engine + +import ( + "context" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +type RebuildPrm struct { + ShardIDs []*shard.ID + ConcurrencyLimit uint32 + TargetFillPercent uint32 +} + +type ShardRebuildResult struct { + ShardID *shard.ID + Success bool + ErrorMsg string +} + +type RebuildRes struct { + ShardResults []ShardRebuildResult +} + +func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild", + trace.WithAttributes( + attribute.Int("shard_id_count", len(prm.ShardIDs)), + attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)), + attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)), + )) + defer span.End() + + res := RebuildRes{ + ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)), + } + resGuard := &sync.Mutex{} + + concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} + + eg, egCtx := errgroup.WithContext(ctx) + for _, shardID := range prm.ShardIDs { + eg.Go(func() error { + e.mtx.RLock() + sh, ok := e.shards[shardID.String()] + e.mtx.RUnlock() + + if !ok { + resGuard.Lock() + defer resGuard.Unlock() + res.ShardResults = append(res.ShardResults, ShardRebuildResult{ + ShardID: shardID, + ErrorMsg: errShardNotFound.Error(), + }) + return nil + } + + err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ + ConcurrencyLimiter: concLimiter, + TargetFillPercent: prm.TargetFillPercent, + }) + + resGuard.Lock() + defer resGuard.Unlock() + + if err != nil { + res.ShardResults = append(res.ShardResults, ShardRebuildResult{ + ShardID: shardID, + ErrorMsg: err.Error(), + }) + } else { + res.ShardResults = append(res.ShardResults, ShardRebuildResult{ + ShardID: shardID, + Success: true, + }) + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return RebuildRes{}, err + } + return res, nil +} + +type concurrencyLimiter struct { + semaphore chan struct{} +} + +func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + select { + case l.semaphore <- struct{}{}: + return l.releaseWorkSlot, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (l *concurrencyLimiter) releaseWorkSlot() { + <-l.semaphore +} diff --git a/pkg/local_object_storage/engine/rebuild_limiter.go b/pkg/local_object_storage/engine/rebuild_limiter.go deleted file mode 100644 index 28b02b0a3..000000000 --- a/pkg/local_object_storage/engine/rebuild_limiter.go +++ /dev/null @@ -1,26 +0,0 @@ -package engine - -import "context" - -type rebuildLimiter struct { - semaphore chan struct{} -} - -func newRebuildLimiter(workersCount uint32) *rebuildLimiter { - return &rebuildLimiter{ - semaphore: make(chan struct{}, workersCount), - } -} - -func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error { - select { - case l.semaphore <- struct{}{}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (l *rebuildLimiter) ReleaseWorkSlot() { - <-l.semaphore -} diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index b99cf4f44..8ab3c5217 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat prm.Concurrency = defaultRemoveDuplicatesConcurrency } - e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies, + e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies, zap.Int("concurrency", prm.Concurrency)) // The mutext must be taken for the whole duration to avoid target shard being removed @@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0. // However we could change weights in future and easily forget this function. for _, sh := range e.shards { - e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID())) + e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID())) ch := make(chan oid.Address) errG, ctx := errgroup.WithContext(ctx) @@ -87,18 +87,18 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat } }) - for i := 0; i < prm.Concurrency; i++ { + for range prm.Concurrency { errG.Go(func() error { return e.removeObjects(ctx, ch) }) } if err := errG.Wait(); err != nil { - e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) + e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) return err } } - e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies) + e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies) return nil } diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go index 99963576c..6d2291c74 100644 --- a/pkg/local_object_storage/engine/remove_copies_test.go +++ b/pkg/local_object_storage/engine/remove_copies_test.go @@ -96,7 +96,7 @@ loop: require.FailNow(t, "unexpected object was removed", removed[i].addr) } - for i := 0; i < copyCount; i++ { + for i := range copyCount { if i%3 == 0 { require.True(t, removedMask[i], "object %d was expected to be removed", i) } else { @@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) { }() const removeCount = 3 - for i := 0; i < removeCount-1; i++ { + for range removeCount - 1 { <-deleteCh signal <- struct{}{} } diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 6a8c9fab9..4243a5481 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -14,8 +14,9 @@ import ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + indexedContainer bool } // SelectRes groups the resulting values of Select operation. @@ -24,8 +25,9 @@ type SelectRes struct { } // WithContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) WithContainerID(cnr cid.ID) { +func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) { p.cnr = cnr + p.indexedContainer = indexedContainer } // WithFilters is a Select option to set the object filters. @@ -49,33 +51,29 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe attribute.String("container_id", prm.cnr.EncodeToString()), )) defer span.End() + defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e._select(ctx, prm) - return err + var sErr error + res, sErr = e._select(ctx, prm) + return sErr }) return } func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { - if e.metrics != nil { - defer elapsed("Search", e.metrics.AddMethodDuration)() - } - addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) - var outError error - var shPrm shard.SelectPrm - shPrm.SetContainerID(prm.cnr) + shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) return false } @@ -87,11 +85,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - }, outError + }, nil } // List returns `limit` available physically storage object addresses in engine. @@ -99,28 +99,26 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { + defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.list(ctx, limit) - return err + var lErr error + res, lErr = e.list(ctx, limit) + return lErr }) return } func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { - if e.metrics != nil { - defer elapsed("ListObjects", e.metrics.AddMethodDuration)() - } - addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) // consider iterating over shuffled shards - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) } else { for _, addr := range res.AddressList() { // save only unique values if _, ok := uniqueMap[addr.EncodeToString()]; !ok { @@ -136,7 +134,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, @@ -144,9 +144,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } // Select selects objects from local storage using provided filters. -func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) { +func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) { var selectPrm SelectPrm - selectPrm.WithContainerID(cnr) + selectPrm.WithContainerID(cnr, isIndexedContainer) selectPrm.WithFilters(fs) res, err := storage.Select(ctx, selectPrm) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 56d4fcd4a..69067c500 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -11,10 +11,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" - "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -97,6 +99,10 @@ func (m *metricsWithID) SetRefillStatus(path string, status string) { m.mw.SetRefillStatus(m.id, path, status) } +func (m *metricsWithID) SetEvacuationInProgress(value bool) { + m.mw.SetEvacuationInProgress(m.id, value) +} + // AddShard adds a new shard to the storage engine. // // Returns any error encountered that did not allow adding a shard. @@ -104,25 +110,23 @@ func (m *metricsWithID) SetRefillStatus(path string, status string) { func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) { sh, err := e.createShard(ctx, opts) if err != nil { - return nil, fmt.Errorf("could not create a shard: %w", err) + return nil, fmt.Errorf("create a shard: %w", err) } err = e.addShard(sh) if err != nil { - return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err) + return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - if e.cfg.metrics != nil { - e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) - } + e.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } -func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*shard.Shard, error) { +func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { id, err := generateShardID() if err != nil { - return nil, fmt.Errorf("could not generate shard ID: %w", err) + return nil, fmt.Errorf("generate shard ID: %w", err) } opts = e.appendMetrics(id, opts) @@ -132,14 +136,13 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh shard.WithExpiredTombstonesCallback(e.processExpiredTombstones), shard.WithExpiredLocksCallback(e.processExpiredLocks), shard.WithDeletedLockCallback(e.processDeletedLocks), - shard.WithReportErrorFunc(e.reportShardErrorBackground), - shard.WithRebuildWorkerLimiter(e.rebuildLimiter), + shard.WithReportErrorFunc(e.reportShardErrorByID), shard.WithZeroSizeCallback(e.processZeroSizeContainers), shard.WithZeroCountCallback(e.processZeroCountContainers), )...) - if err := sh.UpdateID(); err != nil { - e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) + if err := sh.UpdateID(ctx); err != nil { + e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) } return sh, nil @@ -149,28 +152,26 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard e.mtx.RLock() defer e.mtx.RUnlock() - if e.metrics != nil { - opts = append(opts, - shard.WithMetricsWriter( - &metricsWithID{ - id: id.String(), - mw: e.metrics, - }, - ), - shard.WithWriteCacheMetrics( - &writeCacheMetrics{ - shardID: id.String(), - metrics: e.metrics.WriteCache(), - }, - ), - shard.WithGCMetrics( - &gcMetrics{ - storage: e.metrics.GC(), - shardID: id.String(), - }, - ), - ) - } + opts = append(opts, + shard.WithMetricsWriter( + &metricsWithID{ + id: id.String(), + mw: e.metrics, + }, + ), + shard.WithWriteCacheMetrics( + &writeCacheMetrics{ + shardID: id.String(), + metrics: e.metrics.WriteCache(), + }, + ), + shard.WithGCMetrics( + &gcMetrics{ + storage: e.metrics.GC(), + shardID: id.String(), + }, + ), + ) return opts } @@ -179,11 +180,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { e.mtx.Lock() defer e.mtx.Unlock() - pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) - if err != nil { - return fmt.Errorf("could not create pool: %w", err) - } - strID := sh.ID().String() if _, ok := e.shards[strID]; ok { return fmt.Errorf("shard with id %s was already added", strID) @@ -197,14 +193,12 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { hash: hrw.StringHash(strID), } - e.shardPools[strID] = pool - return nil } // removeShards removes specified shards. Skips non-existent shards. // Logs errors about shards that it could not Close after the removal. -func (e *StorageEngine) removeShards(ids ...string) { +func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { if len(ids) == 0 { return } @@ -218,33 +212,27 @@ func (e *StorageEngine) removeShards(ids ...string) { continue } - sh.DeleteShardMetrics() + e.metrics.DeleteShardMetrics(id) ss = append(ss, sh) delete(e.shards, id) - pool, ok := e.shardPools[id] - if ok { - pool.Release() - delete(e.shardPools, id) - } - - e.log.Info(logs.EngineShardHasBeenRemoved, + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } e.mtx.Unlock() for _, sh := range ss { - err := sh.SetMode(mode.Disabled) + err := sh.SetMode(ctx, mode.Disabled) if err != nil { - e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) } - err = sh.Close() + err = sh.Close(ctx) if err != nil { - e.log.Error(logs.EngineCouldNotCloseRemovedShard, + e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -273,7 +261,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) h := hrw.StringHash(objAddr.EncodeToString()) shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } hrw.SortHasherSliceByValue(shards, h) return shards @@ -286,32 +274,44 @@ func (e *StorageEngine) unsortedShards() []hashedShard { shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } return shards } -func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { for i, sh := range e.sortShards(addr) { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(i, sh) { break } } + return nil } -func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { for _, sh := range e.unsortedShards() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(sh) { break } } + return nil } // SetShardMode sets mode of the shard with provided identifier. // // Returns an error if shard mode was not set, or shard was not found in storage engine. -func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error { +func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -319,9 +319,9 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte if id.String() == shID { if resetErrorCounter { sh.errorCount.Store(0) - sh.Shard.ClearErrorCounter() + e.metrics.ClearErrorCounter(shID) } - return sh.SetMode(m) + return sh.SetMode(ctx, m) } } @@ -330,8 +330,6 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { - ev := shard.EventNewEpoch(epoch) - e.mtx.RLock() defer e.mtx.RUnlock() @@ -339,55 +337,54 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- ev: + case sh.NotificationChannel() <- epoch: default: - e.log.Debug(logs.ShardEventProcessingInProgress, + e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) } } } -func (e *StorageEngine) DetachShards(ids []*shard.ID) error { +func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error { if len(ids) == 0 { return logicerr.New("ids must be non-empty") } - deletedShards, err := e.deleteShards(ids) + deletedShards, err := e.deleteShards(ctx, ids) if err != nil { return err } - return e.closeShards(deletedShards) + return e.closeShards(ctx, deletedShards) } // closeShards closes deleted shards. Tries to close all shards. // Returns single error with joined shard errors. -func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { +func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error { var multiErr error var multiErrGuard sync.Mutex var eg errgroup.Group for _, sh := range deletedShards { - sh := sh eg.Go(func() error { - err := sh.SetMode(mode.Disabled) + err := sh.SetMode(ctx, mode.Disabled) if err != nil { - e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err)) multiErrGuard.Unlock() } - err = sh.Close() + err = sh.Close(ctx) if err != nil { - e.log.Error(logs.EngineCouldNotCloseRemovedShard, + e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err)) multiErrGuard.Unlock() } return nil @@ -402,7 +399,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { // deleteShards deletes shards with specified ids from engine shard list // and releases all engine resources associated with shards. // Returns deleted shards or error if some shard could not be deleted. -func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { +func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) { ss := make([]hashedShard, 0, len(ids)) e.mtx.Lock() @@ -424,17 +421,11 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { for _, sh := range ss { idStr := sh.ID().String() - sh.DeleteShardMetrics() + e.metrics.DeleteShardMetrics(idStr) delete(e.shards, idStr) - pool, ok := e.shardPools[idStr] - if ok { - pool.Release() - delete(e.shardPools, idStr) - } - - e.log.Info(logs.EngineShardHasBeenRemoved, + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } @@ -444,3 +435,48 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { func (s hashedShard) Hash() uint64 { return s.hash } + +func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { + var err error + var info []shard.Info + prm := shard.ExistsPrm{ + Address: obj, + } + var siErr *objectSDK.SplitInfoError + var ecErr *objectSDK.ECInfoError + + if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + res, exErr := hs.Exists(ctx, prm) + if exErr != nil { + if client.IsErrObjectAlreadyRemoved(exErr) { + err = new(apistatus.ObjectAlreadyRemoved) + return true + } + + // Check if error is either SplitInfoError or ECInfoError. + // True means the object is virtual. + if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { + info = append(info, hs.DumpInfo()) + return false + } + + if shard.IsErrObjectExpired(exErr) { + err = exErr + return true + } + + if !client.IsErrObjectNotFound(exErr) { + e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) + } + + return false + } + if res.Exists() { + info = append(info, hs.DumpInfo()) + } + return false + }); itErr != nil { + return nil, itErr + } + return info, err +} diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index f4c7a4309..3aa9629b0 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -13,11 +13,10 @@ import ( func TestRemoveShard(t *testing.T) { const numOfShards = 6 - te := testNewEngine(t).setShardsNum(t, numOfShards) + te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shards)) removedNum := numOfShards / 2 @@ -33,11 +32,10 @@ func TestRemoveShard(t *testing.T) { for id, remove := range mSh { if remove { - e.removeShards(id) + e.removeShards(context.Background(), id) } } - require.Equal(t, numOfShards-removedNum, len(e.shardPools)) require.Equal(t, numOfShards-removedNum, len(e.shards)) for id, removed := range mSh { @@ -51,15 +49,15 @@ func TestDisableShards(t *testing.T) { const numOfShards = 2 - te := testNewEngine(t).setShardsNum(t, numOfShards) + te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.ErrorAs(t, e.DetachShards(ids), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards(nil), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards([]*shard.ID{}), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical)) - require.NoError(t, e.DetachShards([]*shard.ID{ids[0]})) + require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]})) require.Equal(t, 1, len(e.shards)) } @@ -72,7 +70,7 @@ func TestSortShardsByWeight(t *testing.T) { var shards1 []hashedShard var weights1 []float64 var shards2 []hashedShard - for i := 0; i < numOfShards; i++ { + for i := range numOfShards { shards1 = append(shards1, hashedShard{ hash: uint64(i), }) diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 39122628f..cfd15b4d4 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.opentelemetry.io/otel/attribute" @@ -37,10 +36,9 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, lm, err := lst[index].TreeMove(ctx, d, treeID, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeMove`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err @@ -71,10 +69,9 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err } @@ -100,10 +97,36 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeApply`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) + } + return err + } + return nil +} + +// TreeApplyBatch implements the pilorama.Forest interface. +func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, cnr, treeID) + if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { + return err + } + + err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m) + if err != nil { + if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { + e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, + zap.Stringer("cid", cnr), + zap.String("tree", treeID)) } return err } @@ -132,10 +155,9 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetByPath`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -165,10 +187,9 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetMeta`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -197,10 +218,9 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetChildren`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -210,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree } // TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", trace.WithAttributes( attribute.String("container_id", cid.EncodeToString()), @@ -221,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, var err error var nodes []pilorama.MultiNodeInfo - var cursor *string + var cursor *pilorama.Cursor for _, sh := range e.sortShards(cid) { nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) if err != nil { @@ -229,10 +249,9 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeSortedByFilename`", err, + e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -261,10 +280,9 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetOpLog`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -291,10 +309,9 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri break } if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { - e.reportShardError(sh, "can't perform `TreeDrop`", err, + e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -321,9 +338,8 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, return nil, err } - e.reportShardError(sh, "can't perform `TreeList`", err, - zap.Stringer("cid", cid), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, sh, "can't perform `TreeList`", err, + zap.Stringer("cid", cid)) // returns as much info about // trees as possible @@ -387,10 +403,9 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height) if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't update tree synchronization height", err, + e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -414,10 +429,9 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't read tree synchronization height", err, + e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 2739058e9..ea0a9e74e 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -34,10 +34,10 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1} treeID := "someTree" - for i := 0; i < objCount; i++ { + for i := range objCount { obj := testutil.GenerateObjectWithCID(cid) testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) - err := Put(context.Background(), te.ng, obj) + err := Put(context.Background(), te.ng, obj, false) if err != nil { b.Fatal(err) } @@ -50,13 +50,13 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { b.Run("search", func(b *testing.B) { var prm SelectPrm - prm.WithContainerID(cid) + prm.WithContainerID(cid, true) var fs objectSDK.SearchFilters fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) prm.WithFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := te.ng.Select(context.Background(), prm) if err != nil { b.Fatal(err) @@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { } }) b.Run("TreeGetByPath", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) if err != nil { b.Fatal(err) diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go index da488260a..e9ba3410f 100644 --- a/pkg/local_object_storage/engine/writecache.go +++ b/pkg/local_object_storage/engine/writecache.go @@ -70,6 +70,9 @@ func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePr type SealWriteCachePrm struct { ShardIDs []*shard.ID IgnoreErrors bool + Async bool + RestoreMode bool + Shrink bool } type ShardSealResult struct { @@ -88,6 +91,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr trace.WithAttributes( attribute.Int("shard_id_count", len(prm.ShardIDs)), attribute.Bool("ignore_errors", prm.IgnoreErrors), + attribute.Bool("restore_mode", prm.RestoreMode), )) defer span.End() @@ -98,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { - shardID := shardID eg.Go(func() error { e.mtx.RLock() sh, ok := e.shards[shardID.String()] @@ -114,7 +117,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr return nil } - err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors}) + err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, Async: prm.Async, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink}) resGuard.Lock() defer resGuard.Unlock() @@ -166,18 +169,16 @@ func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.Sto m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d) } -func (m *writeCacheMetrics) SetEstimateSize(db, fstree uint64) { - m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeDB.String(), db) - m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree) +func (m *writeCacheMetrics) SetEstimateSize(size uint64) { + m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size) } func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) { m.metrics.SetMode(m.shardID, mod.String()) } -func (m *writeCacheMetrics) SetActualCounters(db, fstree uint64) { - m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeDB.String(), db) - m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree) +func (m *writeCacheMetrics) SetActualCounters(count uint64) { + m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count) } func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) { diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go index 23740868d..6b101fa60 100644 --- a/pkg/local_object_storage/internal/log/log.go +++ b/pkg/local_object_storage/internal/log/log.go @@ -1,14 +1,16 @@ package storagelog import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) // Write writes message about storage engine's operation to logger. -func Write(logger *logger.Logger, fields ...zap.Field) { - logger.Debug(logs.StorageOperation, fields...) +func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) { + logger.Debug(ctx, logs.StorageOperation, fields...) } // AddressField returns logger's field for object address. diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go index 586b3dcc6..d46365296 100644 --- a/pkg/local_object_storage/internal/storagetest/storage.go +++ b/pkg/local_object_storage/internal/storagetest/storage.go @@ -11,9 +11,9 @@ import ( // Component represents single storage component. type Component interface { Open(context.Context, mode.Mode) error - SetMode(mode.Mode) error - Init() error - Close() error + SetMode(context.Context, mode.Mode) error + Init(context.Context) error + Close(context.Context) error } // Constructor constructs storage component. @@ -59,18 +59,18 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) { // Use-case: irrecoverable error on some components, close everything. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("RO", func(t *testing.T) { // Use-case: irrecoverable error on some components, close everything. // Open in read-only must be done after the db is here. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) } @@ -79,9 +79,9 @@ func TestCloseTwice(t *testing.T, cons Constructor) { // Use-case: move to maintenance mode twice, first time failed. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) - require.NoError(t, s.Close()) // already closed, no-op + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) + require.NoError(t, s.Close(context.Background())) // already closed, no-op } // TestSetMode checks that any mode transition can be done safely. @@ -91,23 +91,23 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { // call `SetMode` on all not-yet-initialized components. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.SetMode(m)) + require.NoError(t, s.SetMode(context.Background(), m)) t.Run("after open in RO", func(t *testing.T) { - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.SetMode(m)) + require.NoError(t, s.SetMode(context.Background(), m)) }) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("after init", func(t *testing.T) { s := cons(t) // Use-case: notmal node operation. require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.SetMode(m)) - require.NoError(t, s.Close()) + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.SetMode(context.Background(), m)) + require.NoError(t, s.Close(context.Background())) }) } @@ -115,8 +115,8 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) { // Use-case: normal node operation. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.SetMode(from)) - require.NoError(t, s.SetMode(to)) - require.NoError(t, s.Close()) + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.SetMode(context.Background(), from)) + require.NoError(t, s.SetMode(context.Background(), to)) + require.NoError(t, s.Close(context.Background())) } diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 383c596af..52b199b0b 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,7 +1,9 @@ package testutil import ( + cryptorand "crypto/rand" "encoding/binary" + "math/rand" "sync/atomic" "testing" @@ -9,7 +11,6 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -61,7 +62,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = rand.Read(data) + _, _ = cryptorand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -82,7 +83,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = rand.Read(id[:]) + _, _ = cryptorand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go index f7be6014d..cc6f726a4 100644 --- a/pkg/local_object_storage/internal/testutil/generators_test.go +++ b/pkg/local_object_storage/internal/testutil/generators_test.go @@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) { ObjSize: 10, MaxObjects: 4, } - for i := 0; i < 40; i++ { + for range 40 { obj := gen.Next() id, isSet := obj.ID() i := binary.LittleEndian.Uint64(id[:]) @@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) { func TestRandObjGenerator(t *testing.T) { gen := &RandObjGenerator{ObjSize: 10} - for i := 0; i < 10; i++ { + for range 10 { obj := gen.Next() require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) @@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) { func TestRandAddrGenerator(t *testing.T) { gen := RandAddrGenerator(5) - for i := 0; i < 50; i++ { + for range 50 { addr := gen.Next() id := addr.Object() k := binary.LittleEndian.Uint64(id[:]) diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 60e9211d5..1087e40be 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,6 +1,7 @@ package testutil import ( + "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -11,7 +12,6 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/local_object_storage/metabase/VERSION.md b/pkg/local_object_storage/metabase/VERSION.md index 97e514db1..9cfc95332 100644 --- a/pkg/local_object_storage/metabase/VERSION.md +++ b/pkg/local_object_storage/metabase/VERSION.md @@ -2,6 +2,8 @@ This file describes changes between the metabase versions. +Warning: database schema below is outdated and incomplete, see source code. + ## Current ### Primary buckets @@ -86,6 +88,11 @@ This file describes changes between the metabase versions. # History +## Version 3 + +- Payload hash, owner ID and FKBT buckets deleted +- Expiration epoch to object ID and object ID to expiration epoch added + ## Version 2 - Container ID is encoded as 32-byte slice diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go new file mode 100644 index 000000000..de1479e6f --- /dev/null +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -0,0 +1,82 @@ +package meta + +import ( + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.etcd.io/bbolt" +) + +type bucketCache struct { + locked *bbolt.Bucket + graveyard *bbolt.Bucket + garbage *bbolt.Bucket + expired map[cid.ID]*bbolt.Bucket + primary map[cid.ID]*bbolt.Bucket +} + +func newBucketCache() *bucketCache { + return &bucketCache{} +} + +func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(bucketNameLocked) + } + return getBucket(&bc.locked, tx, bucketNameLocked) +} + +func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(graveyardBucketName) + } + return getBucket(&bc.graveyard, tx, graveyardBucketName) +} + +func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(garbageBucketName) + } + return getBucket(&bc.garbage, tx, garbageBucketName) +} + +func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { + if *cache != nil { + return *cache + } + + *cache = tx.Bucket(name) + return *cache +} + +func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = objectToExpirationEpochBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) +} + +func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = primaryBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) +} + +func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { + value, ok := (*m)[cnr] + if ok { + return value + } + + if *m == nil { + *m = make(map[cid.ID]*bbolt.Bucket, 1) + } + + bucketName := make([]byte, bucketKeySize) + bucketName = nameFunc(cnr, bucketName) + (*m)[cnr] = getBucket(&value, tx, bucketName) + return value +} diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go index 472b2affc..da27e6085 100644 --- a/pkg/local_object_storage/metabase/containers.go +++ b/pkg/local_object_storage/metabase/containers.go @@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) { return result, err } -func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { +func (db *DB) ContainerSize(id cid.ID) (uint64, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -64,21 +64,22 @@ func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { return 0, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - size, err = db.containerSize(tx, id) + var size uint64 + err := db.boltDB.View(func(tx *bbolt.Tx) error { + size = db.containerSize(tx, id) - return err + return nil }) return size, metaerr.Wrap(err) } -func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) { +func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 { containerVolume := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) id.Encode(key) - return parseContainerSize(containerVolume.Get(key)), nil + return parseContainerSize(containerVolume.Get(key)) } func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool { diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 5d6788d7e..8d8d91dc7 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -18,13 +18,13 @@ func TestDB_Containers(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const N = 10 cids := make(map[string]int, N) - for i := 0; i < N; i++ { + for range N { obj := testutil.GenerateObject() cnr, _ := obj.ContainerID() @@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) { assertContains(cnrs, cnr) - require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address())) + require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID())) cnrs, err = db.Containers(context.Background()) require.NoError(t, err) @@ -79,7 +79,7 @@ func TestDB_ContainersCount(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const R, T, SG, L = 10, 11, 12, 13 // amount of object per type @@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) { expected := make([]cid.ID, 0, R+T+SG+L) for _, upload := range uploadObjects { - for i := 0; i < upload.amount; i++ { + for range upload.amount { obj := testutil.GenerateObject() obj.SetType(upload.typ) @@ -116,7 +116,7 @@ func TestDB_ContainerSize(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const ( C = 3 @@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) { cids := make(map[cid.ID]int, C) objs := make(map[cid.ID][]*objectSDK.Object, C*N) - for i := 0; i < C; i++ { + for range C { cnr := cidtest.ID() cids[cnr] = 0 - for j := 0; j < N; j++ { + for range N { size := rand.Intn(1024) parent := testutil.GenerateObjectWithCID(cnr) @@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) { require.NoError(t, metaInhume( db, object.AddressOf(obj), - oidtest.Address(), + oidtest.ID(), )) volume -= int(obj.PayloadSize()) diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 891a1e9b2..c19c65224 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -29,6 +29,7 @@ var ( string(garbageBucketName): {}, string(shardInfoBucket): {}, string(bucketNameLocked): {}, + string(expEpochToObjectBucketName): {}, } // deprecatedBuckets buckets that are not used anymore. @@ -38,7 +39,7 @@ var ( ) // Open boltDB instance for metabase. -func (db *DB) Open(_ context.Context, m mode.Mode) error { +func (db *DB) Open(ctx context.Context, m mode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = m @@ -47,16 +48,16 @@ func (db *DB) Open(_ context.Context, m mode.Mode) error { if m.NoMetabase() { return nil } - return db.openDB(m) + return db.openDB(ctx, m) } -func (db *DB) openDB(mode mode.Mode) error { +func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) if err != nil { - return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) + return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) + db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -64,22 +65,22 @@ func (db *DB) openDB(mode mode.Mode) error { } db.boltOptions.ReadOnly = mode.ReadOnly() - return metaerr.Wrap(db.openBolt()) + return metaerr.Wrap(db.openBolt(ctx)) } -func (db *DB) openBolt() error { +func (db *DB) openBolt(ctx context.Context) error { var err error db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) if err != nil { - return fmt.Errorf("can't open boltDB database: %w", err) + return fmt.Errorf("open boltDB database: %w", err) } db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase) + db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug(logs.MetabaseCheckingMetabaseVersion) + db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is @@ -108,7 +109,7 @@ func (db *DB) openBolt() error { // // Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state, // use Reset. -func (db *DB) Init() error { +func (db *DB) Init(_ context.Context) error { return metaerr.Wrap(db.init(false)) } @@ -144,27 +145,27 @@ func (db *DB) init(reset bool) error { if reset { err := tx.DeleteBucket(name) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete static bucket %s: %w", k, err) + return fmt.Errorf("delete static bucket %s: %w", k, err) } } _, err := tx.CreateBucketIfNotExists(name) if err != nil { - return fmt.Errorf("could not create static bucket %s: %w", k, err) + return fmt.Errorf("create static bucket %s: %w", k, err) } } for _, b := range deprecatedBuckets { err := tx.DeleteBucket(b) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err) + return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err) } } if !reset { // counters will be recalculated by refill metabase err = syncCounter(tx, false) if err != nil { - return fmt.Errorf("could not sync object counter: %w", err) + return fmt.Errorf("sync object counter: %w", err) } return nil @@ -204,7 +205,7 @@ func (db *DB) SyncCounters() error { // Close closes boltDB instance // and reports metabase metric. -func (db *DB) Close() error { +func (db *DB) Close(context.Context) error { var err error if db.boltDB != nil { err = db.close() @@ -225,7 +226,7 @@ func (db *DB) close() error { // If there was a problem with applying new configuration, an error is returned. // // If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned. -func (db *DB) Reload(opts ...Option) (bool, error) { +func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) { var c cfg for i := range opts { opts[i](&c) @@ -235,14 +236,14 @@ func (db *DB) Reload(opts ...Option) (bool, error) { defer db.modeMtx.Unlock() if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) { - if err := db.Close(); err != nil { + if err := db.Close(ctx); err != nil { return false, err } db.mode = mode.Disabled db.metrics.SetMode(mode.ComponentDisabled) db.info.Path = c.info.Path - if err := db.openBolt(); err != nil { + if err := db.openBolt(ctx); err != nil { return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err)) } diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go index 0354a5eb6..d26402675 100644 --- a/pkg/local_object_storage/metabase/control_test.go +++ b/pkg/local_object_storage/metabase/control_test.go @@ -15,7 +15,7 @@ import ( func TestReset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() err := db.Reset() require.NoError(t, err) @@ -41,7 +41,7 @@ func TestReset(t *testing.T) { err = putBig(db, obj) require.NoError(t, err) - err = metaInhume(db, addrToInhume, oidtest.Address()) + err = metaInhume(db, addrToInhume, oidtest.ID()) require.NoError(t, err) assertExists(addr, true, nil) diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 275099ff2..732f99519 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -238,26 +238,26 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { } if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil { - return fmt.Errorf("could not increase phy object counter: %w", err) + return fmt.Errorf("increase phy object counter: %w", err) } if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil { - return fmt.Errorf("could not increase logical object counter: %w", err) + return fmt.Errorf("increase logical object counter: %w", err) } if isUserObject { if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil { - return fmt.Errorf("could not increase user object counter: %w", err) + return fmt.Errorf("increase user object counter: %w", err) } } return db.incContainerObjectCounter(tx, cnrID, isUserObject) } -func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error { +func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error { b := tx.Bucket(shardInfoBucket) if b == nil { return nil } - return db.updateShardObjectCounterBucket(b, typ, delta, inc) + return db.updateShardObjectCounterBucket(b, typ, delta, false) } func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error { @@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject func syncCounter(tx *bbolt.Tx, force bool) error { shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket) if err != nil { - return fmt.Errorf("could not get shard info bucket: %w", err) + return fmt.Errorf("get shard info bucket: %w", err) } shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 && len(shardInfoB.Get(objectLogicCounterKey)) == 8 && @@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName) if err != nil { - return fmt.Errorf("could not get container counter bucket: %w", err) + return fmt.Errorf("get container counter bucket: %w", err) } var addr oid.Address @@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { return nil }) if err != nil { - return fmt.Errorf("could not iterate objects: %w", err) + return fmt.Errorf("iterate objects: %w", err) } return setObjectCounters(counters, shardInfoB, containerCounterB) @@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container value := containerCounterValue(count) err := containerCounterB.Put(key, value) if err != nil { - return fmt.Errorf("could not update phy container object counter: %w", err) + return fmt.Errorf("update phy container object counter: %w", err) } } phyData := make([]byte, 8) @@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err := shardInfoB.Put(objectPhyCounterKey, phyData) if err != nil { - return fmt.Errorf("could not update phy object counter: %w", err) + return fmt.Errorf("update phy object counter: %w", err) } logData := make([]byte, 8) @@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectLogicCounterKey, logData) if err != nil { - return fmt.Errorf("could not update logic object counter: %w", err) + return fmt.Errorf("update logic object counter: %w", err) } userData := make([]byte, 8) @@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectUserCounterKey, userData) if err != nil { - return fmt.Errorf("could not update user object counter: %w", err) + return fmt.Errorf("update user object counter: %w", err) } return nil @@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) { } var cnrID cid.ID if err := cnrID.Decode(buf); err != nil { - return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err) + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) } return cnrID, nil } @@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrReadOnlyMode } - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { b := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) @@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrReadOnlyMode } - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { b := tx.Bucket(containerCounterBucketName) key := make([]byte, cidSize) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index 1797fc0aa..950385a29 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -22,7 +22,7 @@ func TestCounters(t *testing.T) { t.Run("defaults", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() c, err := db.ObjectCounters() require.NoError(t, err) require.Zero(t, c.Phy) @@ -37,16 +37,16 @@ func TestCounters(t *testing.T) { t.Run("put", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]*objectSDK.Object, 0, objCount) - for i := 0; i < objCount; i++ { + for range objCount { oo = append(oo, testutil.GenerateObject()) } var prm meta.PutPrm exp := make(map[cid.ID]meta.ObjectCounters) - for i := 0; i < objCount; i++ { + for i := range objCount { prm.SetObject(oo[i]) cnrID, _ := oo[i].ContainerID() c := meta.ObjectCounters{} @@ -75,7 +75,7 @@ func TestCounters(t *testing.T) { t.Run("delete", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -120,7 +120,7 @@ func TestCounters(t *testing.T) { t.Run("inhume", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -156,13 +156,18 @@ func TestCounters(t *testing.T) { } var prm meta.InhumePrm - prm.SetTombstoneAddress(oidtest.Address()) - prm.SetAddresses(inhumedObjs...) + for _, o := range inhumedObjs { + tombAddr := oidtest.Address() + tombAddr.SetContainer(o.Container()) - res, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed()) - require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed()) + prm.SetTombstoneAddress(tombAddr) + prm.SetAddresses(o) + + res, err := db.Inhume(context.Background(), prm) + require.NoError(t, err) + require.Equal(t, uint64(1), res.LogicInhumed()) + require.Equal(t, uint64(1), res.UserInhumed()) + } c, err := db.ObjectCounters() require.NoError(t, err) @@ -180,14 +185,14 @@ func TestCounters(t *testing.T) { t.Run("put_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() parObj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) // put objects and check that parent info // does not affect the counter - for i := 0; i < objCount; i++ { + for i := range objCount { o := testutil.GenerateObject() if i < objCount/2 { // half of the objs will have the parent o.SetParent(parObj) @@ -218,7 +223,7 @@ func TestCounters(t *testing.T) { t.Run("delete_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -260,7 +265,7 @@ func TestCounters(t *testing.T) { t.Run("inhume_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -296,11 +301,16 @@ func TestCounters(t *testing.T) { } var prm meta.InhumePrm - prm.SetTombstoneAddress(oidtest.Address()) - prm.SetAddresses(inhumedObjs...) + for _, o := range inhumedObjs { + tombAddr := oidtest.Address() + tombAddr.SetContainer(o.Container()) - _, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) + prm.SetTombstoneAddress(tombAddr) + prm.SetAddresses(o) + + _, err := db.Inhume(context.Background(), prm) + require.NoError(t, err) + } c, err := db.ObjectCounters() require.NoError(t, err) @@ -319,7 +329,7 @@ func TestCounters(t *testing.T) { func TestDoublePut(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) @@ -377,7 +387,7 @@ func TestCounters_Expired(t *testing.T) { es := &epochState{epoch} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]oid.Address, objCount) for i := range oo { @@ -535,7 +545,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK parent := testutil.GenerateObject() oo := make([]*objectSDK.Object, 0, count) - for i := 0; i < count; i++ { + for i := range count { o := testutil.GenerateObject() if withParent { o.SetParent(parent) diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go index 1f444a3ef..4474aa229 100644 --- a/pkg/local_object_storage/metabase/db.go +++ b/pkg/local_object_storage/metabase/db.go @@ -11,9 +11,9 @@ import ( "sync" "time" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/mr-tron/base58" "go.etcd.io/bbolt" @@ -70,7 +70,7 @@ func defaultCfg() *cfg { }, boltBatchDelay: bbolt.DefaultMaxBatchDelay, boltBatchSize: bbolt.DefaultMaxBatchSize, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), metrics: &noopMetrics{}, } } diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go index 01e1ed2bc..edaeb13c5 100644 --- a/pkg/local_object_storage/metabase/db_test.go +++ b/pkg/local_object_storage/metabase/db_test.go @@ -6,10 +6,10 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -32,7 +32,17 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error { } func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) { - res, err := metaSelect(db, cnr, fs) + res, err := metaSelect(db, cnr, fs, false) + require.NoError(t, err) + require.Len(t, res, len(exp)) + + for i := range exp { + require.Contains(t, res, exp[i]) + } +} + +func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) { + res, err := metaSelect(db, cnr, fs, useAttrIndex) require.NoError(t, err) require.Len(t, res, len(exp)) @@ -51,7 +61,7 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB { ) require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) return bdb } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index ae10564a8..9a5a6e574 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) { type referenceNumber struct { all, cur int - addr oid.Address - obj *objectSDK.Object } @@ -112,14 +110,14 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { var err error var res DeleteRes - err = db.boltDB.Update(func(tx *bbolt.Tx) error { + err = db.boltDB.Batch(func(tx *bbolt.Tx) error { res, err = db.deleteGroup(tx, prm.addrs) return err }) if err == nil { deleted = true for i := range prm.addrs { - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(prm.addrs[i]), storagelog.OpField("metabase DELETE")) } @@ -163,28 +161,28 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { - err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) + err := db.decShardObjectCounter(tx, phy, res.phyCount) if err != nil { - return fmt.Errorf("could not decrease phy object counter: %w", err) + return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { - err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) + err := db.decShardObjectCounter(tx, logical, res.logicCount) if err != nil { - return fmt.Errorf("could not decrease logical object counter: %w", err) + return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { - err := db.updateShardObjectCounter(tx, user, res.userCount, false) + err := db.decShardObjectCounter(tx, user, res.userCount) if err != nil { - return fmt.Errorf("could not decrease user object counter: %w", err) + return fmt.Errorf("decrease user object counter: %w", err) } } if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil { - return fmt.Errorf("could not decrease container object counter: %w", err) + return fmt.Errorf("decrease container object counter: %w", err) } return nil } @@ -261,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } return deleteSingleResult{}, nil @@ -282,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } @@ -295,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter nRef, ok := refCounter[k] if !ok { nRef = &referenceNumber{ - all: parentLength(tx, parAddr), - addr: parAddr, - obj: parent, + all: parentLength(tx, parAddr), + obj: parent, } refCounter[k] = nRef @@ -311,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter // remove object err = db.deleteObject(tx, obj, false) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove object: %w", err) } if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil { @@ -338,12 +335,12 @@ func (db *DB) deleteObject( err = updateListIndexes(tx, obj, delListIndexItem) if err != nil { - return fmt.Errorf("can't remove list indexes: %w", err) + return fmt.Errorf("remove list indexes: %w", err) } err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) if err != nil { - return fmt.Errorf("can't remove fake bucket tree indexes: %w", err) + return fmt.Errorf("remove fake bucket tree indexes: %w", err) } if isParent { @@ -354,7 +351,7 @@ func (db *DB) deleteObject( addrKey := addressKey(object.AddressOf(obj), key) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove from garbage bucket: %w", err) + return fmt.Errorf("remove from garbage bucket: %w", err) } } } @@ -366,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) if err != nil { return 0 } @@ -379,25 +376,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt := tx.Bucket(item.name) if bkt != nil { - _ = bkt.Delete(item.key) // ignore error, best effort there + return bkt.Delete(item.key) } -} - -func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt := tx.Bucket(item.name) - if bkt == nil { - return nil - } - - fkbtRoot := bkt.Bucket(item.key) - if fkbtRoot == nil { - return nil - } - - _ = fkbtRoot.Delete(item.val) // ignore error, best effort there return nil } @@ -423,19 +406,56 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - _ = bkt.Delete(item.key) // ignore error, best effort there - - return nil + return bkt.Delete(item.key) } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return nil // ignore error, best effort there + return err } - _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there - return nil + return bkt.Put(item.key, encodedLst) +} + +func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { + bkt := tx.Bucket(item.name) + if bkt == nil { + return nil + } + + fkbtRoot := bkt.Bucket(item.key) + if fkbtRoot == nil { + return nil + } + + if err := fkbtRoot.Delete(item.val); err != nil { + return err + } + + if hasAnyItem(fkbtRoot) { + return nil + } + + if err := bkt.DeleteBucket(item.key); err != nil { + return err + } + + if hasAnyItem(bkt) { + return nil + } + + return tx.DeleteBucket(item.name) +} + +func hasAnyItem(b *bbolt.Bucket) bool { + var hasAnyItem bool + c := b.Cursor() + for k, _ := c.First(); k != nil; { + hasAnyItem = true + break + } + return hasAnyItem } func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error { @@ -458,25 +478,48 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }) + }); err != nil { + return err + } } else { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } } - delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }) - delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } + + if expEpoch, ok := hasExpirationEpoch(obj); ok { + if err := delUniqueIndexItem(tx, namedBucketItem{ + name: expEpochToObjectBucketName, + key: expirationEpochKey(expEpoch, cnr, addr.Object()), + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ + name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), + key: objKey, + }); err != nil { + return err + } + } return nil } @@ -496,16 +539,18 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }) + }); err != nil { + return err + } } if ech.ParentSplitParentID() == nil { @@ -534,16 +579,15 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } // drop split info - delUniqueIndexItem(tx, namedBucketItem{ + return delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) - return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go index 0e627f095..884da23ff 100644 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ b/pkg/local_object_storage/metabase/delete_ec_test.go @@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk := oidtest.ID() @@ -39,7 +39,6 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { tombstoneID := oidtest.ID() chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetContainerID(cnr) chunkObj.SetID(ecChunk) chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) chunkObj.SetPayloadSize(uint64(10)) @@ -131,17 +130,9 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) require.Equal(t, 2, len(tombstonedObjects)) - var tombstones []oid.Address - for _, tss := range tombstonedObjects { - tombstones = append(tombstones, tss.tomb) - } - inhumePrm.SetAddresses(tombstones...) - inhumePrm.SetGCMark() - _, err = db.Inhume(context.Background(), inhumePrm) + _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) require.NoError(t, err) - require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects)) - // GC finds tombstone as garbage and deletes it garbageAddresses = nil @@ -195,8 +186,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunks := make([]oid.ID, chunksCount) @@ -375,17 +366,9 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) require.True(t, len(tombstonedObjects) == parentCount+chunksCount) - var tombstones []oid.Address - for _, tss := range tombstonedObjects { - tombstones = append(tombstones, tss.tomb) - } - inhumePrm.SetAddresses(tombstones...) - inhumePrm.SetGCMark() - _, err = db.Inhume(context.Background(), inhumePrm) + _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) require.NoError(t, err) - require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects)) - // GC finds tombstone as garbage and deletes it garbageAddresses = nil @@ -418,7 +401,8 @@ func testVerifyNoObjectDataLeft(tx *bbolt.Tx) error { return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { if bytes.Equal(name, shardInfoBucket) || bytes.Equal(name, containerCounterBucketName) || - bytes.Equal(name, containerVolumeBucketName) { + bytes.Equal(name, containerVolumeBucketName) || + bytes.Equal(name, expEpochToObjectBucketName) { return nil } return testBucketEmpty(name, b) diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go new file mode 100644 index 000000000..0329e3a73 --- /dev/null +++ b/pkg/local_object_storage/metabase/delete_meta_test.go @@ -0,0 +1,85 @@ +package meta + +import ( + "bytes" + "context" + "path/filepath" + "testing" + + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" +) + +func TestPutDeleteIndexAttributes(t *testing.T) { + db := New([]Option{ + WithPath(filepath.Join(t.TempDir(), "metabase")), + WithPermissions(0o600), + WithEpochState(epochState{}), + }...) + + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() + + cnr := cidtest.ID() + obj1 := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name") + testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object") + + var putPrm PutPrm + putPrm.SetObject(obj1) + + _, err := db.Put(context.Background(), putPrm) + require.NoError(t, err) + + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.Nil(t, b) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.Nil(t, b) + return nil + })) + + obj2 := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name") + testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object") + + putPrm.SetObject(obj2) + putPrm.SetIndexAttributes(true) + + _, err = db.Put(context.Background(), putPrm) + require.NoError(t, err) + + objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize)) + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.NotNil(t, b) + b = b.Bucket([]byte("CRDT-Name")) + require.NotNil(t, b) + require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.NotNil(t, b) + b = b.Bucket([]byte("/path/to/object")) + require.NotNil(t, b) + require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) + return nil + })) + + var dPrm DeletePrm + dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2)) + _, err = db.Delete(context.Background(), dPrm) + require.NoError(t, err) + + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.Nil(t, b) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.Nil(t, b) + return nil + })) +} diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index 2053874d0..c0762a377 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -18,7 +18,7 @@ import ( func TestDB_Delete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() parent := testutil.GenerateObjectWithCID(cnr) @@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) { // inhume parent and child so they will be on graveyard ts := testutil.GenerateObjectWithCID(cnr) - err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object()) require.NoError(t, err) ts = testutil.GenerateObjectWithCID(cnr) - err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object()) require.NoError(t, err) // delete object @@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) { func TestDeleteAllChildren(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -103,12 +103,12 @@ func TestDeleteAllChildren(t *testing.T) { func TestGraveOnlyDelete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() // inhume non-existent object by address - require.NoError(t, metaInhume(db, addr, oidtest.Address())) + require.NoError(t, metaInhume(db, addr, oidtest.ID())) // delete the object data require.NoError(t, metaDelete(db, addr)) @@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) { func TestExpiredObject(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { // removing expired object should be error-free @@ -128,10 +128,10 @@ func TestExpiredObject(t *testing.T) { func TestDelete(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() - for i := 0; i < 10; i++ { + for range 10 { obj := testutil.GenerateObjectWithCID(cnr) var prm meta.PutPrm @@ -170,7 +170,7 @@ func TestDelete(t *testing.T) { func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 153d92110..7bd6f90a6 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -20,8 +19,8 @@ import ( // ExistsPrm groups the parameters of Exists operation. type ExistsPrm struct { - addr oid.Address - paddr oid.Address + addr oid.Address + ecParentAddr oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -37,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) { p.addr = addr } -// SetParent is an Exists option to set objects parent. -func (p *ExistsPrm) SetParent(addr oid.Address) { - p.paddr = addr +// SetECParent is an Exists option to set objects parent. +func (p *ExistsPrm) SetECParent(addr oid.Address) { + p.ecParentAddr = addr } // Exists returns the fact that the object is in the metabase. @@ -82,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err currEpoch := db.epochState.CurrentEpoch() err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch) + res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch) return err }) @@ -90,13 +89,28 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err return res, metaerr.Wrap(err) } -func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) { +func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) { var locked bool - if !parent.Equals(oid.Address{}) { - locked = objectLocked(tx, parent.Container(), parent.Object()) + if !ecParent.Equals(oid.Address{}) { + st, err := objectStatus(tx, ecParent, currEpoch) + if err != nil { + return false, false, err + } + switch st { + case 2: + return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) + case 3: + return false, locked, ErrObjectIsExpired + } + + locked = objectLocked(tx, ecParent.Container(), ecParent.Object()) } // check graveyard and object expiration first - switch objectStatus(tx, addr, currEpoch) { + st, err := objectStatus(tx, addr, currEpoch) + if err != nil { + return false, false, err + } + switch st { case 1: return false, locked, logicerr.Wrap(new(apistatus.ObjectNotFound)) case 2: @@ -138,30 +152,29 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo // - 1 if object with GC mark; // - 2 if object is covered with tombstone; // - 3 if object is expired. -func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) uint8 { +func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { + return objectStatusWithCache(nil, tx, addr, currEpoch) +} + +func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { // locked object could not be removed/marked with GC/expired - if objectLocked(tx, addr.Container(), addr.Object()) { - return 0 + if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { + return 0, nil } - // we check only if the object is expired in the current - // epoch since it is considered the only corner case: the - // GC is expected to collect all the objects that have - // expired previously for less than the one epoch duration - - expired := isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpoch, addr, currEpoch) - if !expired { - expired = isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpochNeoFS, addr, currEpoch) + expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) + if err != nil { + return 0, err } if expired { - return 3 + return 3, nil } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + graveyardBkt := getGraveyardBucket(bc, tx) + garbageBkt := getGarbageBucket(bc, tx) addrKey := addressKey(addr, make([]byte, addressKeySize)) - return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt) + return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil } func inGraveyardWithKey(addrKey []byte, graveyard, garbageBCK *bbolt.Bucket) uint8 { @@ -219,7 +232,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e err := splitInfo.Unmarshal(rawSplitInfo) if err != nil { - return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err) + return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } return splitInfo, nil diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go index 0087c1e31..3045e17f1 100644 --- a/pkg/local_object_storage/metabase/exists_test.go +++ b/pkg/local_object_storage/metabase/exists_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "errors" "testing" @@ -18,7 +19,7 @@ const currEpoch = 1000 func TestDB_Exists(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("no object", func(t *testing.T) { nonExist := testutil.GenerateObject() @@ -37,7 +38,7 @@ func TestDB_Exists(t *testing.T) { require.True(t, exists) t.Run("removed object", func(t *testing.T) { - err := metaInhume(db, object.AddressOf(regular), oidtest.Address()) + err := metaInhume(db, object.AddressOf(regular), oidtest.ID()) require.NoError(t, err) exists, err := metaExists(db, object.AddressOf(regular)) diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go index aa2cb6f20..a1351cb6f 100644 --- a/pkg/local_object_storage/metabase/expired.go +++ b/pkg/local_object_storage/metabase/expired.go @@ -2,12 +2,11 @@ package meta import ( "context" + "encoding/binary" "errors" - "fmt" "strconv" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -17,6 +16,8 @@ import ( "go.opentelemetry.io/otel/trace" ) +var errInvalidEpochValueLength = errors.New("could not parse expiration epoch: invalid data length") + // FilterExpired return expired items from addresses. // Address considered expired if metabase does contain information about expiration and // expiration epoch is less than epoch. @@ -57,29 +58,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A default: } - expiredNeoFS, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpochNeoFS, epoch, containerID, objectIDs) + expired, err := selectExpiredObjects(tx, epoch, containerID, objectIDs) if err != nil { return err } - - expiredSys, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpoch, epoch, containerID, objectIDs) - if err != nil { - return err - } - - for _, o := range expiredNeoFS { - var a oid.Address - a.SetContainer(containerID) - a.SetObject(o) - result = append(result, a) - } - - for _, o := range expiredSys { - var a oid.Address - a.SetContainer(containerID) - a.SetObject(o) - result = append(result, a) - } + result = append(result, expired...) } return nil }) @@ -90,76 +73,41 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A return result, nil } -func isExpiredWithAttribute(tx *bbolt.Tx, attr string, addr oid.Address, currEpoch uint64) bool { - // bucket with objects that have expiration attr - attrKey := make([]byte, bucketKeySize+len(attr)) - expirationBucket := tx.Bucket(attributeBucketName(addr.Container(), attr, attrKey)) - if expirationBucket != nil { - // bucket that contains objects that expire in the current epoch - prevEpochBkt := expirationBucket.Bucket([]byte(strconv.FormatUint(currEpoch-1, 10))) - if prevEpochBkt != nil { - rawOID := objectKey(addr.Object(), make([]byte, objectKeySize)) - if prevEpochBkt.Get(rawOID) != nil { - return true - } - } - } - - return false +func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { + return isExpiredWithCache(nil, tx, addr, currEpoch) } -func selectExpiredObjectIDs(tx *bbolt.Tx, attr string, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.ID, error) { - result := make([]oid.ID, 0) - notResolved := make(map[oid.ID]struct{}) - for _, oid := range objectIDs { - notResolved[oid] = struct{}{} +func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { + b := getExpiredBucket(bc, tx, addr.Container()) + if b == nil { + return false, nil } - - expiredBuffer := make([]oid.ID, 0) - objectKeyBuffer := make([]byte, objectKeySize) - - expirationBucketKey := make([]byte, bucketKeySize+len(attr)) - expirationBucket := tx.Bucket(attributeBucketName(containerID, attr, expirationBucketKey)) - if expirationBucket == nil { - return result, nil // all not expired + key := make([]byte, objectKeySize) + addr.Object().Encode(key) + val := b.Get(key) + if len(val) == 0 { + return false, nil } + if len(val) != epochSize { + return false, errInvalidEpochValueLength + } + expEpoch := binary.LittleEndian.Uint64(val) + return expEpoch < currEpoch, nil +} - err := expirationBucket.ForEach(func(epochExpBucketKey, _ []byte) error { - bucketExpiresAfter, err := strconv.ParseUint(string(epochExpBucketKey), 10, 64) +func selectExpiredObjects(tx *bbolt.Tx, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.Address, error) { + result := make([]oid.Address, 0) + var addr oid.Address + addr.SetContainer(containerID) + for _, objID := range objectIDs { + addr.SetObject(objID) + expired, err := isExpired(tx, addr, epoch) if err != nil { - return fmt.Errorf("could not parse expiration epoch: %w", err) - } else if bucketExpiresAfter >= epoch { - return nil + return nil, err } - - epochExpirationBucket := expirationBucket.Bucket(epochExpBucketKey) - if epochExpirationBucket == nil { - return nil + if expired { + result = append(result, addr) } - - expiredBuffer = expiredBuffer[:0] - for oid := range notResolved { - key := objectKey(oid, objectKeyBuffer) - if epochExpirationBucket.Get(key) != nil { - expiredBuffer = append(expiredBuffer, oid) - } - } - - for _, oid := range expiredBuffer { - delete(notResolved, oid) - result = append(result, oid) - } - - if len(notResolved) == 0 { - return errBreakBucketForEach - } - - return nil - }) - - if err != nil && !errors.Is(err, errBreakBucketForEach) { - return nil, err } - return result, nil } diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go index bb98745ee..495c1eee7 100644 --- a/pkg/local_object_storage/metabase/expired_test.go +++ b/pkg/local_object_storage/metabase/expired_test.go @@ -13,7 +13,7 @@ import ( func TestDB_SelectExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() containerID1 := cidtest.ID() diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index d9acd4ce2..821810c09 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -88,8 +88,16 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { } func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { + return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) +} + +func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { if checkStatus { - switch objectStatus(tx, addr, currEpoch) { + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) + if err != nil { + return nil, err + } + switch st { case 1: return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) case 2: @@ -105,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b bucketName := make([]byte, bucketKeySize) // check in primary index - data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) + if b := getPrimaryBucket(bc, tx, cnr); b != nil { + if data := b.Get(key); len(data) != 0 { + return obj, obj.Unmarshal(data) + } } - data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) + data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) if len(data) != 0 { return nil, getECInfoError(tx, cnr, data) } @@ -160,17 +169,29 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) } - // pick last item, for now there is not difference which address to pick - // but later list might be sorted so first or last value can be more - // prioritized to choose - virtualOID := relativeLst[len(relativeLst)-1] - data := getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID) + var data []byte + for i := 0; i < len(relativeLst) && len(data) == 0; i++ { + virtualOID := relativeLst[len(relativeLst)-i-1] + data = getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID) + } + + if len(data) == 0 { + // check if any of the relatives is an EC object + for _, relative := range relativeLst { + data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), relative) + if len(data) > 0 { + // we can't return object headers, but can return error, + // so assembler can try to assemble complex object + return nil, getSplitInfoError(tx, cnr, key) + } + } + } child := objectSDK.New() err = child.Unmarshal(data) if err != nil { - return nil, fmt.Errorf("can't unmarshal child with parent: %w", err) + return nil, fmt.Errorf("unmarshal child with parent: %w", err) } par := child.Parent() @@ -199,10 +220,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { ecInfo := objectSDK.NewECInfo() for _, key := range keys { // check in primary index - ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) - if len(ojbData) != 0 { + objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) + if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(ojbData); err != nil { + if err := obj.Unmarshal(objData); err != nil { return err } chunk := objectSDK.ECChunk{} diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index 247ddf9cd..98c428410 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -25,7 +25,7 @@ import ( func TestDB_Get(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() @@ -150,9 +150,8 @@ func TestDB_Get(t *testing.T) { t.Run("get removed object", func(t *testing.T) { obj := oidtest.Address() - ts := oidtest.Address() - require.NoError(t, metaInhume(db, obj, ts)) + require.NoError(t, metaInhume(db, obj, oidtest.ID())) _, err := metaGet(db, obj, false) require.True(t, client.IsErrObjectAlreadyRemoved(err)) @@ -220,10 +219,9 @@ func benchmarkGet(b *testing.B, numOfObj int) { meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchDelay(10*time.Millisecond), ) - defer func() { require.NoError(b, db.Close()) }() addrs := make([]oid.Address, 0, numOfObj) - for i := 0; i < numOfObj; i++ { + for range numOfObj { raw := testutil.GenerateObject() addrs = append(addrs, object.AddressOf(raw)) @@ -235,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { } db, addrs := prepareDb(runtime.NumCPU()) + defer func() { require.NoError(b, db.Close(context.Background())) }() b.Run("parallel", func(b *testing.B) { b.ReportAllocs() @@ -254,14 +253,14 @@ func benchmarkGet(b *testing.B, numOfObj int) { }) }) - require.NoError(b, db.Close()) + require.NoError(b, db.Close(context.Background())) require.NoError(b, os.RemoveAll(b.Name())) db, addrs = prepareDb(1) b.Run("serial", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for i := range b.N { var getPrm meta.GetPrm getPrm.SetAddress(addrs[i%len(addrs)]) diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index 80d40fb78..2f23d424c 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" ) @@ -176,7 +177,7 @@ type gcHandler struct { func (g gcHandler) handleKV(k, _ []byte) error { o, err := garbageFromKV(k) if err != nil { - return fmt.Errorf("could not parse garbage object: %w", err) + return fmt.Errorf("parse garbage object: %w", err) } return g.h(o) @@ -189,7 +190,7 @@ type graveyardHandler struct { func (g graveyardHandler) handleKV(k, v []byte) error { o, err := graveFromKV(k, v) if err != nil { - return fmt.Errorf("could not parse grave: %w", err) + return fmt.Errorf("parse grave: %w", err) } return g.h(o) @@ -239,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address) func garbageFromKV(k []byte) (res GarbageObject, err error) { err = decodeAddressFromKey(&res.addr, k) if err != nil { - err = fmt.Errorf("could not parse address: %w", err) + err = fmt.Errorf("parse address: %w", err) } return @@ -255,46 +256,58 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) { return } -// DropGraves deletes tombstoned objects from the +// InhumeTombstones deletes tombstoned objects from the // graveyard bucket. // // Returns any error appeared during deletion process. -func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error { +func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) { var ( startedAt = time.Now() success = false ) defer func() { - db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success) + db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves") + _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones") defer span.End() db.modeMtx.RLock() defer db.modeMtx.RUnlock() if db.mode.NoMetabase() { - return ErrDegradedMode + return InhumeRes{}, ErrDegradedMode } else if db.mode.ReadOnly() { - return ErrReadOnlyMode + return InhumeRes{}, ErrReadOnlyMode } buf := make([]byte, addressKeySize) + prm := InhumePrm{forceRemoval: true} + currEpoch := db.epochState.CurrentEpoch() - return db.boltDB.Update(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(graveyardBucketName) - if bkt == nil { - return nil + var res InhumeRes + + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { + res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)} + + garbageBKT := tx.Bucket(garbageBucketName) + graveyardBKT := tx.Bucket(graveyardBucketName) + + bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) + if err != nil { + return err } - for _, ts := range tss { - err := bkt.Delete(addressKey(ts.Address(), buf)) - if err != nil { + for i := range tss { + if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil { + return err + } + if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil { return err } } return nil }) + return res, err } diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index 75c7e2852..ebadecc04 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -7,6 +7,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" @@ -14,7 +17,7 @@ import ( func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var counter int var iterGravePRM meta.GraveyardIterationPrm @@ -41,7 +44,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { func TestDB_Iterate_OffsetNotFound(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj1 := testutil.GenerateObject() obj2 := testutil.GenerateObject() @@ -112,13 +115,14 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) { func TestDB_IterateDeletedObjects(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() + cnr := cidtest.ID() // generate and put 4 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - obj3 := testutil.GenerateObject() - obj4 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) + obj3 := testutil.GenerateObjectWithCID(cnr) + obj4 := testutil.GenerateObjectWithCID(cnr) var err error @@ -138,6 +142,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() + addrTombstone.SetContainer(cnr) inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) inhumePrm.SetTombstoneAddress(addrTombstone) @@ -199,13 +204,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) { func TestDB_IterateOverGraveyard_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() + cnr := cidtest.ID() // generate and put 4 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - obj3 := testutil.GenerateObject() - obj4 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) + obj3 := testutil.GenerateObjectWithCID(cnr) + obj4 := testutil.GenerateObjectWithCID(cnr) var err error @@ -223,6 +229,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() + addrTombstone.SetContainer(cnr) var inhumePrm meta.InhumePrm inhumePrm.SetAddresses( @@ -298,7 +305,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { func TestDB_IterateOverGarbage_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // generate and put 4 objects obj1 := testutil.GenerateObject() @@ -388,13 +395,14 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { require.False(t, iWasCalled) } -func TestDB_DropGraves(t *testing.T) { +func TestDB_InhumeTombstones(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() + cnr := cidtest.ID() // generate and put 2 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) var err error @@ -404,8 +412,20 @@ func TestDB_DropGraves(t *testing.T) { err = putBig(db, obj2) require.NoError(t, err) - // inhume with tombstone - addrTombstone := oidtest.Address() + id1, _ := obj1.ID() + id2, _ := obj2.ID() + ts := objectSDK.NewTombstone() + ts.SetMembers([]oid.ID{id1, id2}) + objTs := objectSDK.New() + objTs.SetContainerID(cnr) + objTs.SetType(objectSDK.TypeTombstone) + + data, _ := ts.Marshal() + objTs.SetPayload(data) + require.NoError(t, objectSDK.CalculateAndSetID(objTs)) + require.NoError(t, putBig(db, objTs)) + + addrTombstone := object.AddressOf(objTs) var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) @@ -428,8 +448,11 @@ func TestDB_DropGraves(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, counter) - err = db.DropGraves(context.Background(), buriedTS) + res, err := db.InhumeTombstones(context.Background(), buriedTS) require.NoError(t, err) + require.EqualValues(t, 1, res.LogicInhumed()) + require.EqualValues(t, 0, res.UserInhumed()) + require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID()) counter = 0 iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error { diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index c265fb217..76018fb61 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -143,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() { p.forceRemoval = true } +func (p *InhumePrm) validate() error { + if p == nil { + return nil + } + if p.tomb != nil { + for _, addr := range p.target { + if addr.Container() != p.tomb.Container() { + return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb) + } + } + } + return nil +} + var errBreakBucketForEach = errors.New("bucket ForEach break") // ErrLockObjectRemoval is returned when inhume operation is being @@ -171,6 +185,10 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() + if err := prm.validate(); err != nil { + return InhumeRes{}, err + } + if db.mode.NoMetabase() { return InhumeRes{}, ErrDegradedMode } else if db.mode.ReadOnly() { @@ -181,13 +199,13 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { inhumedByCnrID: make(map[cid.ID]ObjectCounters), } currEpoch := db.epochState.CurrentEpoch() - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { return db.inhumeTx(tx, currEpoch, prm, &res) }) success = err == nil if success { for _, addr := range prm.target { - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(addr), storagelog.OpField("metabase INHUME")) } @@ -199,88 +217,96 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes garbageBKT := tx.Bucket(garbageBucketName) graveyardBKT := tx.Bucket(graveyardBucketName) - bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm) + bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) if err != nil { return err } buf := make([]byte, addressKeySize) for i := range prm.target { - id := prm.target[i].Object() - cnr := prm.target[i].Container() - - // prevent locked objects to be inhumed - if !prm.forceRemoval && objectLocked(tx, cnr, id) { - return new(apistatus.ObjectLocked) - } - - var lockWasChecked bool - - // prevent lock objects to be inhumed - // if `Inhume` was called not with the - // `WithForceGCMark` option - if !prm.forceRemoval { - if isLockObject(tx, cnr, id) { - return ErrLockObjectRemoval - } - - lockWasChecked = true - } - - obj, err := db.get(tx, prm.target[i], buf, false, true, epoch) - targetKey := addressKey(prm.target[i], buf) - var ecErr *objectSDK.ECInfoError - if err == nil { - err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res) - if err != nil { - return err - } - } else if errors.As(err, &ecErr) { - err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value, targetKey) - if err != nil { - return err - } - } - - if prm.tomb != nil { - var isTomb bool - isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey) - if err != nil { - return err - } - - if isTomb { - continue - } - } - - // consider checking if target is already in graveyard? - err = bkt.Put(targetKey, value) - if err != nil { + if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil { return err } - - if prm.lockObjectHandling { - // do not perform lock check if - // it was already called - if lockWasChecked { - // inhumed object is not of - // the LOCK type - continue - } - - if isLockObject(tx, cnr, id) { - res.deletedLockObj = append(res.deletedLockObj, prm.target[i]) - } - } } return db.applyInhumeResToCounters(tx, res) } +func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error { + id := addr.Object() + cnr := addr.Container() + tx := bkt.Tx() + + // prevent locked objects to be inhumed + if !prm.forceRemoval && objectLocked(tx, cnr, id) { + return new(apistatus.ObjectLocked) + } + + var lockWasChecked bool + + // prevent lock objects to be inhumed + // if `Inhume` was called not with the + // `WithForceGCMark` option + if !prm.forceRemoval { + if isLockObject(tx, cnr, id) { + return ErrLockObjectRemoval + } + + lockWasChecked = true + } + + obj, err := db.get(tx, addr, buf, false, true, epoch) + targetKey := addressKey(addr, buf) + var ecErr *objectSDK.ECInfoError + if err == nil { + err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res) + if err != nil { + return err + } + } else if errors.As(err, &ecErr) { + err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value) + if err != nil { + return err + } + } + + if prm.tomb != nil { + var isTomb bool + isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey) + if err != nil { + return err + } + + if isTomb { + return nil + } + } + + // consider checking if target is already in graveyard? + err = bkt.Put(targetKey, value) + if err != nil { + return err + } + + if prm.lockObjectHandling { + // do not perform lock check if + // it was already called + if lockWasChecked { + // inhumed object is not of + // the LOCK type + return nil + } + + if isLockObject(tx, cnr, id) { + res.deletedLockObj = append(res.deletedLockObj, addr) + } + } + return nil +} + func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes, garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket, - ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte, targetKey []byte, + ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte, ) error { for _, chunk := range ecInfo.Chunks { chunkBuf := make([]byte, addressKeySize) @@ -296,11 +322,11 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I if err != nil { return err } - err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, chunkObj, res) + chunkKey := addressKey(chunkAddr, chunkBuf) + err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, chunkKey, cnr, chunkObj, res) if err != nil { return err } - chunkKey := addressKey(chunkAddr, chunkBuf) if tomb != nil { _, err = db.markAsGC(graveyardBKT, garbageBKT, chunkKey) if err != nil { @@ -316,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I } func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { - if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil { return err } - if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil { return err } @@ -336,7 +362,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { // 1. tombstone address if Inhume was called with // a Tombstone // 2. zeroValue if Inhume was called with a GC mark -func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) { +func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) { if prm.tomb != nil { targetBucket = graveyardBKT tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize)) @@ -347,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck if data != nil { err := targetBucket.Delete(tombKey) if err != nil { - return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err) + return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err) } } @@ -359,11 +385,8 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck return targetBucket, value, nil } -func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) { - targetIsTomb, err := isTomb(graveyardBKT, key) - if err != nil { - return false, err - } +func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) { + targetIsTomb := isTomb(graveyardBKT, addressKey) // do not add grave if target is a tombstone if targetIsTomb { @@ -372,7 +395,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool // if tombstone appears object must be // additionally marked with GC - return false, garbageBKT.Put(key, zeroValue) + return false, garbageBKT.Put(addressKey, zeroValue) } func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error { @@ -392,25 +415,21 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc return nil } -func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) { +func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool { targetIsTomb := false // iterate over graveyard and check if target address // is the address of tombstone in graveyard. - err := graveyardBucket.ForEach(func(_, v []byte) error { + // tombstone must have the same container ID as key. + c := graveyardBucket.Cursor() + containerPrefix := addressKey[:cidSize] + for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() { // check if graveyard has record with key corresponding // to tombstone address (at least one) - targetIsTomb = bytes.Equal(v, key) - + targetIsTomb = bytes.Equal(v, addressKey) if targetIsTomb { - // break bucket iterator - return errBreakBucketForEach + break } - - return nil - }) - if err != nil && !errors.Is(err, errBreakBucketForEach) { - return false, err } - return targetIsTomb, nil + return targetIsTomb } diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go new file mode 100644 index 000000000..180713287 --- /dev/null +++ b/pkg/local_object_storage/metabase/inhume_ec_test.go @@ -0,0 +1,114 @@ +package meta + +import ( + "context" + "path/filepath" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + "github.com/stretchr/testify/require" +) + +func TestInhumeECObject(t *testing.T) { + t.Parallel() + + db := New( + WithPath(filepath.Join(t.TempDir(), "metabase")), + WithPermissions(0o600), + WithEpochState(epochState{uint64(12)}), + ) + + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() + + cnr := cidtest.ID() + ecChunk := oidtest.ID() + ecChunk2 := oidtest.ID() + ecParent := oidtest.ID() + tombstoneID := oidtest.ID() + + chunkObj := testutil.GenerateObjectWithCID(cnr) + chunkObj.SetID(ecChunk) + chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) + chunkObj.SetPayloadSize(uint64(5)) + chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0)) + + chunkObj2 := testutil.GenerateObjectWithCID(cnr) + chunkObj2.SetID(ecChunk2) + chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) + chunkObj2.SetPayloadSize(uint64(10)) + chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 1, 3, []byte{}, 0)) + + // put object with EC + + var prm PutPrm + prm.SetObject(chunkObj) + prm.SetStorageID([]byte("0/0")) + _, err := db.Put(context.Background(), prm) + require.NoError(t, err) + + prm.SetObject(chunkObj2) + _, err = db.Put(context.Background(), prm) + require.NoError(t, err) + + var ecChunkAddress oid.Address + ecChunkAddress.SetContainer(cnr) + ecChunkAddress.SetObject(ecChunk) + + var ecParentAddress oid.Address + ecParentAddress.SetContainer(cnr) + ecParentAddress.SetObject(ecParent) + + var chunkObjectAddress oid.Address + chunkObjectAddress.SetContainer(cnr) + chunkObjectAddress.SetObject(ecChunk) + + var getPrm GetPrm + + getPrm.SetAddress(ecChunkAddress) + _, err = db.Get(context.Background(), getPrm) + require.NoError(t, err) + + var ecInfoError *objectSDK.ECInfoError + getPrm.SetAddress(ecParentAddress) + _, err = db.Get(context.Background(), getPrm) + require.ErrorAs(t, err, &ecInfoError) + require.True(t, len(ecInfoError.ECInfo().Chunks) == 2 && + ecInfoError.ECInfo().Chunks[0].Index == 0 && + ecInfoError.ECInfo().Chunks[0].Total == 3) + + // inhume Chunk + var inhumePrm InhumePrm + var tombAddress oid.Address + inhumePrm.SetAddresses(chunkObjectAddress) + res, err := db.Inhume(context.Background(), inhumePrm) + require.NoError(t, err) + require.True(t, len(res.deletionDetails) == 1) + require.True(t, res.deletionDetails[0].Size == 5) + + // inhume EC parent (like Delete does) + tombAddress.SetContainer(cnr) + tombAddress.SetObject(tombstoneID) + inhumePrm.SetAddresses(ecParentAddress) + inhumePrm.SetTombstoneAddress(tombAddress) + res, err = db.Inhume(context.Background(), inhumePrm) + require.NoError(t, err) + // Previously deleted chunk shouldn't be in the details, because it is marked as garbage + require.True(t, len(res.deletionDetails) == 1) + require.True(t, res.deletionDetails[0].Size == 10) + + getPrm.SetAddress(ecParentAddress) + _, err = db.Get(context.Background(), getPrm) + require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) + + getPrm.SetAddress(ecChunkAddress) + _, err = db.Get(context.Background(), getPrm) + require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) +} diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index 163fbec2a..786d10396 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -9,6 +9,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" @@ -16,17 +17,15 @@ import ( func TestDB_Inhume(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() testutil.AddAttribute(raw, "foo", "bar") - tombstoneID := oidtest.Address() - err := putBig(db, raw) require.NoError(t, err) - err = metaInhume(db, object.AddressOf(raw), tombstoneID) + err = metaInhume(db, object.AddressOf(raw), oidtest.ID()) require.NoError(t, err) _, err = metaExists(db, object.AddressOf(raw)) @@ -38,18 +37,25 @@ func TestDB_Inhume(t *testing.T) { func TestInhumeTombOnTomb(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var ( err error + cnr = cidtest.ID() addr1 = oidtest.Address() addr2 = oidtest.Address() addr3 = oidtest.Address() + addr4 = oidtest.Address() inhumePrm meta.InhumePrm existsPrm meta.ExistsPrm ) + addr1.SetContainer(cnr) + addr2.SetContainer(cnr) + addr3.SetContainer(cnr) + addr4.SetContainer(cnr) + inhumePrm.SetAddresses(addr1) inhumePrm.SetTombstoneAddress(addr2) @@ -84,7 +90,7 @@ func TestInhumeTombOnTomb(t *testing.T) { require.True(t, client.IsErrObjectAlreadyRemoved(err)) inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + inhumePrm.SetTombstoneAddress(addr4) // try to inhume addr1 (which is already a tombstone in graveyard) _, err = db.Inhume(context.Background(), inhumePrm) @@ -101,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) { func TestInhumeLocked(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() locked := oidtest.Address() @@ -117,10 +123,13 @@ func TestInhumeLocked(t *testing.T) { require.ErrorAs(t, err, &e) } -func metaInhume(db *meta.DB, target, tomb oid.Address) error { +func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error { var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(target) - inhumePrm.SetTombstoneAddress(tomb) + var tombAddr oid.Address + tombAddr.SetContainer(target.Container()) + tombAddr.SetObject(tomb) + inhumePrm.SetTombstoneAddress(tombAddr) _, err := db.Inhume(context.Background(), inhumePrm) return err diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 7b60b7d50..9cccd7dad 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -3,17 +3,14 @@ package meta import ( "context" "errors" - "fmt" "strconv" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" @@ -79,127 +76,37 @@ func (db *DB) IterateExpired(ctx context.Context, epoch uint64, h ExpiredObjectH } func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) error { - err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error { - cidBytes := cidFromAttributeBucket(name, objectV2.SysAttributeExpEpoch) - if cidBytes == nil { - cidBytes = cidFromAttributeBucket(name, objectV2.SysAttributeExpEpochNeoFS) - if cidBytes == nil { - return nil - } - } - - var cnrID cid.ID - err := cnrID.Decode(cidBytes) + b := tx.Bucket(expEpochToObjectBucketName) + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + expiresAfter, cnr, obj, err := parseExpirationEpochKey(k) if err != nil { - return fmt.Errorf("could not parse container ID of expired bucket: %w", err) - } - - return b.ForEachBucket(func(expKey []byte) error { - bktExpired := b.Bucket(expKey) - expiresAfter, err := strconv.ParseUint(string(expKey), 10, 64) - if err != nil { - return fmt.Errorf("could not parse expiration epoch: %w", err) - } else if expiresAfter >= epoch { - return nil - } - - return bktExpired.ForEach(func(idKey, _ []byte) error { - var id oid.ID - - err = id.Decode(idKey) - if err != nil { - return fmt.Errorf("could not parse ID of expired object: %w", err) - } - - // Ignore locked objects. - // - // To slightly optimize performance we can check only REGULAR objects - // (only they can be locked), but it's more reliable. - if objectLocked(tx, cnrID, id) { - return nil - } - - var addr oid.Address - addr.SetContainer(cnrID) - addr.SetObject(id) - - return h(&ExpiredObject{ - typ: firstIrregularObjectType(tx, cnrID, idKey), - addr: addr, - }) - }) - }) - }) - - if errors.Is(err, ErrInterruptIterator) { - err = nil - } - - return err -} - -// IterateCoveredByTombstones iterates over all objects in DB which are covered -// by tombstone with string address from tss. Locked objects are not included -// (do not confuse with objects of type LOCK). -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -// -// Does not modify tss. -func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - return db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateCoveredByTombstones(tx, tss, h) - }) -} - -func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error { - bktGraveyard := tx.Bucket(graveyardBucketName) - - err := bktGraveyard.ForEach(func(k, v []byte) error { - var addr oid.Address - if err := decodeAddressFromKey(&addr, v); err != nil { return err } - if _, ok := tss[addr.EncodeToString()]; ok { - var addr oid.Address - - err := decodeAddressFromKey(&addr, k) - if err != nil { - return fmt.Errorf("could not parse address of the object under tombstone: %w", err) - } - - if objectLocked(tx, addr.Container(), addr.Object()) { - return nil - } - - return h(addr) + // bucket keys ordered by epoch, no need to continue lookup + if expiresAfter >= epoch { + return nil } - - return nil - }) - - if errors.Is(err, ErrInterruptIterator) { - err = nil + if objectLocked(tx, cnr, obj) { + continue + } + var addr oid.Address + addr.SetContainer(cnr) + addr.SetObject(obj) + objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) + err = h(&ExpiredObject{ + typ: firstIrregularObjectType(tx, cnr, objKey), + addr: addr, + }) + if err == nil { + continue + } + if errors.Is(err, ErrInterruptIterator) { + return nil + } + return err } - - return err + return nil } func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error { diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 54d56d923..4c9579965 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -5,10 +5,10 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -17,7 +17,7 @@ import ( func TestDB_IterateExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const epoch = 13 @@ -66,60 +66,3 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt return object2.AddressOf(obj) } - -func TestDB_IterateCoveredByTombstones(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() - - ts := oidtest.Address() - protected1 := oidtest.Address() - protected2 := oidtest.Address() - protectedLocked := oidtest.Address() - garbage := oidtest.Address() - - var prm meta.InhumePrm - var err error - - prm.SetAddresses(protected1, protected2, protectedLocked) - prm.SetTombstoneAddress(ts) - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - prm.SetAddresses(garbage) - prm.SetGCMark() - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - var handled []oid.Address - - tss := map[string]oid.Address{ - ts.EncodeToString(): ts, - } - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 3) - require.Contains(t, handled, protected1) - require.Contains(t, handled, protected2) - require.Contains(t, handled, protectedLocked) - - err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()}) - require.NoError(t, err) - - handled = handled[:0] - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 2) - require.NotContains(t, handled, protectedLocked) -} diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index b4326a92c..2a0bd7f6a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "time" @@ -61,8 +62,33 @@ func (l ListRes) Cursor() *Cursor { return l.cursor } +// IterateOverContainersPrm contains parameters for IterateOverContainers operation. +type IterateOverContainersPrm struct { + // Handler function executed upon containers in db. + Handler func(context.Context, objectSDK.Type, cid.ID) error +} + +// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type IterateOverObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID + // Handler function executed upon objects in db. + Handler func(context.Context, *objectcore.Info) error +} + +// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type CountAliveObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID +} + // ListWithCursor lists physical objects available in metabase starting from -// cursor. Includes objects of all types. Does not include inhumed objects. +// cursor. Includes objects of all types. Does not include inhumed and expired +// objects. // Use cursor value from response for consecutive requests. // // Returns ErrEndOfListing if there are no more objects to return or count @@ -113,11 +139,12 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, var containerID cid.ID var offset []byte - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + bc := newBucketCache() rawAddr := make([]byte, cidSize, addressKeySize) + currEpoch := db.epochState.CurrentEpoch() + loop: for ; name != nil; name, _ = c.Next() { cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name) @@ -141,8 +168,8 @@ loop: bkt := tx.Bucket(name) if bkt != nil { copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, - result, count, cursor, threshold) + result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, + result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err } @@ -160,8 +187,7 @@ loop: if offset != nil { // new slice is much faster but less memory efficient // we need to copy, because offset exists during bbolt tx - cursor.inBucketOffset = make([]byte, len(offset)) - copy(cursor.inBucketOffset, offset) + cursor.inBucketOffset = bytes.Clone(offset) } if len(result) == 0 { @@ -170,29 +196,29 @@ loop: // new slice is much faster but less memory efficient // we need to copy, because bucketName exists during bbolt tx - cursor.bucketName = make([]byte, len(bucketName)) - copy(cursor.bucketName, bucketName) + cursor.bucketName = bytes.Clone(bucketName) return result, cursor, nil } // selectNFromBucket similar to selectAllFromBucket but uses cursor to find // object to start selecting from. Ignores inhumed objects. -func selectNFromBucket(bkt *bbolt.Bucket, // main bucket +func selectNFromBucket( + bc *bucketCache, + bkt *bbolt.Bucket, // main bucket objType objectSDK.Type, // type of the objects stored in the main bucket - graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets cidRaw []byte, // container ID prefix, optimization cnt cid.ID, // container ID to []objectcore.Info, // listing result limit int, // stop listing at `limit` items in result cursor *Cursor, // start from cursor object threshold bool, // ignore cursor and start immediately + currEpoch uint64, ) ([]objectcore.Info, []byte, *Cursor, error) { if cursor == nil { cursor = new(Cursor) } - count := len(to) c := bkt.Cursor() k, v := c.First() @@ -204,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } for ; k != nil; k, v = c.Next() { - if count >= limit { + if len(to) >= limit { break } @@ -214,17 +240,25 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } offset = k + graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) + garbageBkt := getGarbageBucket(bc, bkt.Tx()) if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { continue } + var o objectSDK.Object + if err := o.Unmarshal(v); err != nil { + return nil, nil, nil, err + } + + expEpoch, hasExpEpoch := hasExpirationEpoch(&o) + if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { + continue + } + var isLinkingObj bool var ecInfo *objectcore.ECInfo if objType == objectSDK.TypeRegular { - var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { - return nil, nil, nil, err - } isLinkingObj = isLinkObject(&o) ecHeader := o.ECHeader() if ecHeader != nil { @@ -240,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket a.SetContainer(cnt) a.SetObject(obj) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) - count++ } return to, offset, cursor, nil @@ -259,3 +292,211 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte) return rawID, name[0] } + +// IterateOverContainers lists physical containers available in metabase starting from first. +func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return ErrDegradedMode + } + + err := db.boltDB.View(func(tx *bbolt.Tx) error { + return db.iterateOverContainers(ctx, tx, prm) + }) + success = err == nil + return metaerr.Wrap(err) +} + +func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error { + var containerID cid.ID + for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} { + c := tx.Cursor() + for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() { + cidRaw, _ := parseContainerIDWithPrefix(&containerID, name) + if cidRaw == nil { + continue + } + var cnt cid.ID + copy(cnt[:], containerID[:]) + var objType objectSDK.Type + switch prefix[0] { + case primaryPrefix: + objType = objectSDK.TypeRegular + case lockersPrefix: + objType = objectSDK.TypeLock + case tombstonePrefix: + objType = objectSDK.TypeTombstone + default: + continue + } + err := prm.Handler(ctx, objType, cnt) + if err != nil { + return err + } + } + } + + return nil +} + +// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first. +func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return ErrDegradedMode + } + + err := db.boltDB.View(func(tx *bbolt.Tx) error { + return db.iterateOverObjectsInContainer(ctx, tx, prm) + }) + success = err == nil + return metaerr.Wrap(err) +} + +func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error { + var prefix byte + switch prm.ObjectType { + case objectSDK.TypeRegular: + prefix = primaryPrefix + case objectSDK.TypeLock: + prefix = lockersPrefix + case objectSDK.TypeTombstone: + prefix = tombstonePrefix + default: + return nil + } + bucketName := []byte{prefix} + bucketName = append(bucketName, prm.ContainerID[:]...) + + bkt := tx.Bucket(bucketName) + if bkt == nil { + return nil + } + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) + c := bkt.Cursor() + k, v := c.First() + + for ; k != nil; k, v = c.Next() { + var obj oid.ID + if err := obj.Decode(k); err != nil { + break + } + + if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { + continue + } + + var isLinkingObj bool + var ecInfo *objectcore.ECInfo + if prm.ObjectType == objectSDK.TypeRegular { + var o objectSDK.Object + if err := o.Unmarshal(v); err != nil { + return err + } + isLinkingObj = isLinkObject(&o) + ecHeader := o.ECHeader() + if ecHeader != nil { + ecInfo = &objectcore.ECInfo{ + ParentID: ecHeader.Parent(), + Index: ecHeader.Index(), + Total: ecHeader.Total(), + } + } + } + + var a oid.Address + a.SetContainer(prm.ContainerID) + a.SetObject(obj) + objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo} + err := prm.Handler(ctx, &objInfo) + if err != nil { + return err + } + } + return nil +} + +// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. +func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket") + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return 0, ErrDegradedMode + } + + var prefix byte + switch prm.ObjectType { + case objectSDK.TypeRegular: + prefix = primaryPrefix + case objectSDK.TypeLock: + prefix = lockersPrefix + case objectSDK.TypeTombstone: + prefix = tombstonePrefix + default: + return 0, nil + } + bucketName := []byte{prefix} + bucketName = append(bucketName, prm.ContainerID[:]...) + var count uint64 + err := db.boltDB.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(bucketName) + if bkt == nil { + return nil + } + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) + c := bkt.Cursor() + k, _ := c.First() + for ; k != nil; k, _ = c.Next() { + if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { + continue + } + count++ + } + return nil + }) + success = err == nil + return count, metaerr.Wrap(err) +} diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index a92e2eff4..02985991c 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -3,13 +3,17 @@ package meta_test import ( "context" "errors" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" @@ -17,6 +21,8 @@ import ( func BenchmarkListWithCursor(b *testing.B) { db := listWithCursorPrepareDB(b) + defer func() { require.NoError(b, db.Close(context.Background())) }() + b.Run("1 item", func(b *testing.B) { benchmarkListWithCursor(b, db, 1) }) @@ -32,10 +38,9 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ NoSync: true, })) // faster single-thread generation - defer func() { require.NoError(b, db.Close()) }() obj := testutil.GenerateObject() - for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes + for i := range 100_000 { // should be a multiple of all batch sizes obj.SetID(oidtest.ID()) if i%9 == 0 { // let's have 9 objects per container obj.SetContainerID(cidtest.ID()) @@ -51,10 +56,10 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if err != meta.ErrEndOfListing { + if !errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) @@ -69,18 +74,20 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { func TestLisObjectsWithCursor(t *testing.T) { t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() - const ( + currEpoch = 100 + expEpoch = currEpoch - 1 containers = 5 - total = containers * 4 // regular + ts + child + lock + total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired ) + db := newDB(t, meta.WithEpochState(epochState{currEpoch})) + defer func() { require.NoError(t, db.Close(context.Background())) }() + expected := make([]object.Info, 0, total) // fill metabase with objects - for i := 0; i < containers; i++ { + for range containers { containerID := cidtest.ID() // add one regular object @@ -110,7 +117,7 @@ func TestLisObjectsWithCursor(t *testing.T) { err = putBig(db, obj) require.NoError(t, err) ts := testutil.GenerateObjectWithCID(containerID) - err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object()) require.NoError(t, err) // add one child object (do not include parent into expected) @@ -125,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) { err = putBig(db, child) require.NoError(t, err) expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular}) + + // add expired object (do not include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + + // add non-expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) + + // add locked expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + objID := oidtest.ID() + obj.SetID(objID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID})) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) } t.Run("success with various count", func(t *testing.T) { @@ -140,7 +167,7 @@ func TestLisObjectsWithCursor(t *testing.T) { expectedIterations-- } - for i := 0; i < expectedIterations; i++ { + for range expectedIterations { res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor) require.NoError(t, err, "count:%d", countPerReq) got = append(got, res...) @@ -162,14 +189,14 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const total = 5 expected := make(map[string]int, total) // fill metabase with objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err := putBig(db, obj) require.NoError(t, err) @@ -186,7 +213,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { } // add new objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err = putBig(db, obj) require.NoError(t, err) @@ -219,3 +246,59 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec r, err := db.ListWithCursor(context.Background(), listPrm) return r.AddressList(), r.Cursor(), err } + +func TestIterateOver(t *testing.T) { + t.Parallel() + + db := newDB(t) + defer func() { require.NoError(t, db.Close(context.Background())) }() + + const total uint64 = 5 + for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} { + var expected []*objectSDK.Object + // fill metabase with objects + cid := cidtest.ID() + for range total { + obj := testutil.GenerateObjectWithCID(cid) + obj.SetType(typ) + err := metaPut(db, obj, nil) + require.NoError(t, err) + expected = append(expected, obj) + } + + var metaIter meta.IterateOverObjectsInContainerPrm + var count uint64 + metaIter.Handler = func(context.Context, *object.Info) error { + count++ + return nil + } + metaIter.ContainerID = cid + metaIter.ObjectType = typ + err := db.IterateOverObjectsInContainer(context.Background(), metaIter) + require.NoError(t, err) + require.Equal(t, total, count) + + var metaCount meta.CountAliveObjectsInContainerPrm + metaCount.ContainerID = cid + metaCount.ObjectType = typ + res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount) + require.NoError(t, err) + require.Equal(t, res, total) + + err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1])) + require.NoError(t, err) + + res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount) + require.NoError(t, err) + require.Equal(t, uint64(3), res) + } + var count int + var metaPrm meta.IterateOverContainersPrm + metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error { + count++ + return nil + } + err := db.IterateOverContainers(context.Background(), metaPrm) + require.NoError(t, err) + require.Equal(t, 3, count) +} diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index 732ba426d..f4cb9e53b 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -4,8 +4,10 @@ import ( "bytes" "context" "fmt" + "slices" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -62,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - if len(locked) == 0 { - panic("empty locked list") - } + assert.False(len(locked) == 0, "empty locked list") err := db.lockInternal(locked, cnr, locker) success = err == nil @@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error { } key := make([]byte, cidSize) - return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error { + return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error { if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular { return logicerr.Wrap(new(apistatus.LockNonRegularObject)) } @@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { var unlockedObjects []oid.Address - if err := db.boltDB.Update(func(tx *bbolt.Tx) error { + if err := db.boltDB.Batch(func(tx *bbolt.Tx) error { for i := range lockers { unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object()) if err != nil { @@ -162,7 +162,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { // checks if specified object is locked in the specified container. func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := tx.Bucket(bucketNameLocked) + return objectLockedWithCache(nil, tx, idCnr, idObj) +} + +func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { + bucketLocked := getLockedBucket(bc, tx) if bucketLocked != nil { key := make([]byte, cidSize) idCnr.Encode(key) @@ -176,7 +180,7 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { } // return `LOCK` id's if specified object is locked in the specified container. -func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { +func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { var lockers []oid.ID bucketLocked := tx.Bucket(bucketNameLocked) if bucketLocked != nil { @@ -250,7 +254,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres unlockedObjects = append(unlockedObjects, addr) } else { // exclude locker - keyLockers = append(keyLockers[:i], keyLockers[i+1:]...) + keyLockers = slices.Delete(keyLockers, i, i+1) v, err = encodeList(keyLockers) if err != nil { @@ -351,20 +355,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e return res, err } -// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found +// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found // object is considered as non-locked. // // Returns only non-logical errors related to underlying database. -func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { +func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { var ( startedAt = time.Now() success = false ) defer func() { - db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success) + db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked", + _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) @@ -377,7 +381,7 @@ func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, er return res, ErrDegradedMode } err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res, err = getLocked(tx, addr.Container(), addr.Object()) + res, err = getLocks(tx, addr.Container(), addr.Object()) return nil })) success = err == nil diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 2d7bfc1cc..341ff9ad1 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) { cnr := cidtest.ID() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("empty locked list", func(t *testing.T) { require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) @@ -73,7 +73,9 @@ func TestDB_Lock(t *testing.T) { _, err := db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + tombAddr := oidtest.Address() + tombAddr.SetContainer(objAddr.Container()) + inhumePrm.SetTombstoneAddress(tombAddr) _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) @@ -89,7 +91,9 @@ func TestDB_Lock(t *testing.T) { _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + tombAddr = oidtest.Address() + tombAddr.SetContainer(objAddr.Container()) + inhumePrm.SetTombstoneAddress(tombAddr) _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) }) @@ -103,7 +107,7 @@ func TestDB_Lock(t *testing.T) { var objLockedErr *apistatus.ObjectLocked // try to inhume locked object using tombstone - err := metaInhume(db, objAddr, lockAddr) + err := metaInhume(db, objAddr, lockAddr.Object()) require.ErrorAs(t, err, &objLockedErr) // free locked object @@ -155,7 +159,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetGCMark() - for i := 0; i < objsNum; i++ { + for i := range objsNum { inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) res, err = db.Inhume(context.Background(), inhumePrm) @@ -183,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) { es := &epochState{e: 123} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // put an object addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124) @@ -205,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // existing and locked objs @@ -255,7 +259,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs) lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs) - for i := 0; i < numOfLockedObjs; i++ { + for range numOfLockedObjs { obj := testutil.GenerateObjectWithCID(cnr) err := putBig(db, obj) require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index 2032ed6b2..7edb96384 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -1,6 +1,7 @@ package meta import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -8,7 +9,7 @@ import ( // SetMode sets the metabase mode of operation. // If the mode assumes no operation metabase, the database is closed. -func (db *DB) SetMode(m mode.Mode) error { +func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() @@ -17,20 +18,20 @@ func (db *DB) SetMode(m mode.Mode) error { } if !db.mode.NoMetabase() { - if err := db.Close(); err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + if err := db.Close(ctx); err != nil { + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } if m.NoMetabase() { db.boltDB = nil } else { - err := db.openDB(m) + err := db.openDB(ctx, m) if err == nil && !m.ReadOnly() { - err = db.Init() + err = db.Init(ctx) } if err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go index 1b9f60055..28b42283f 100644 --- a/pkg/local_object_storage/metabase/mode_test.go +++ b/pkg/local_object_storage/metabase/mode_test.go @@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) require.NoError(t, bdb.Open(context.Background(), mode.Degraded)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index f351cb485..5e1bbfe9e 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" gio "io" + "strconv" "time" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -13,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -33,6 +35,8 @@ type PutPrm struct { obj *objectSDK.Object id []byte + + indexAttributes bool } // PutRes groups the resulting values of Put operation. @@ -50,10 +54,13 @@ func (p *PutPrm) SetStorageID(id []byte) { p.id = id } +func (p *PutPrm) SetIndexAttributes(v bool) { + p.indexAttributes = v +} + var ( - ErrUnknownObjectType = errors.New("unknown object type") - ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it") - ErrIncorrectRootObject = errors.New("invalid root object") + ErrUnknownObjectType = errors.New("unknown object type") + ErrIncorrectRootObject = errors.New("invalid root object") ) // Put saves object header in metabase. Object payload expected to be cut. @@ -88,12 +95,12 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) { err = db.boltDB.Batch(func(tx *bbolt.Tx) error { var e error - res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch) + res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes) return e }) if err == nil { success = true - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(objectCore.AddressOf(prm.obj)), storagelog.OpField("metabase PUT")) } @@ -106,15 +113,22 @@ func (db *DB) put(tx *bbolt.Tx, id []byte, si *objectSDK.SplitInfo, currEpoch uint64, + indexAttributes bool, ) (PutRes, error) { cnr, ok := obj.ContainerID() if !ok { return PutRes{}, errors.New("missing container in object") } + var ecParentAddress oid.Address + if ecHeader := obj.ECHeader(); ecHeader != nil { + ecParentAddress.SetContainer(cnr) + ecParentAddress.SetObject(ecHeader.Parent()) + } + isParent := si != nil - exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch) + exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch) var splitInfoError *objectSDK.SplitInfoError if errors.As(err, &splitInfoError) { @@ -127,7 +141,7 @@ func (db *DB) put(tx *bbolt.Tx, return PutRes{}, db.updateObj(tx, obj, id, si, isParent) } - return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch) + return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes) } func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error { @@ -150,14 +164,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje return nil } -func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error { +func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error { if par := obj.Parent(); par != nil && !isParent { // limit depth by two parentSI, err := splitInfoFromObject(obj) if err != nil { return err } - _, err = db.put(tx, par, id, parentSI, currEpoch) + _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes) if err != nil { return err } @@ -165,17 +179,19 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o err := putUniqueIndexes(tx, obj, si, id) if err != nil { - return fmt.Errorf("can't put unique indexes: %w", err) + return fmt.Errorf("put unique indexes: %w", err) } err = updateListIndexes(tx, obj, putListIndexItem) if err != nil { - return fmt.Errorf("can't put list indexes: %w", err) + return fmt.Errorf("put list indexes: %w", err) } - err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) - if err != nil { - return fmt.Errorf("can't put fake bucket tree indexes: %w", err) + if indexAttributes { + err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) + if err != nil { + return fmt.Errorf("put fake bucket tree indexes: %w", err) + } } // update container volume size estimation @@ -195,46 +211,17 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o return nil } -func putUniqueIndexes( - tx *bbolt.Tx, - obj *objectSDK.Object, - si *objectSDK.SplitInfo, - id []byte, -) error { +func putUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, si *objectSDK.SplitInfo, id []byte) error { isParent := si != nil addr := objectCore.AddressOf(obj) - cnr := addr.Container() objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) bucketName := make([]byte, bucketKeySize) - // add value to primary unique bucket if !isParent { - switch obj.Type() { - case objectSDK.TypeRegular: - bucketName = primaryBucketName(cnr, bucketName) - case objectSDK.TypeTombstone: - bucketName = tombstoneBucketName(cnr, bucketName) - case objectSDK.TypeLock: - bucketName = bucketNameLockers(cnr, bucketName) - default: - return ErrUnknownObjectType - } - - rawObject, err := obj.CutPayload().Marshal() - if err != nil { - return fmt.Errorf("can't marshal object header: %w", err) - } - - err = putUniqueIndexItem(tx, namedBucketItem{ - name: bucketName, - key: objKey, - val: rawObject, - }) + err := putRawObjectData(tx, obj, bucketName, addr, objKey) if err != nil { return err } - - // index storageID if it is present if id != nil { if err = setStorageID(tx, objectCore.AddressOf(obj), id, false); err != nil { return err @@ -242,7 +229,60 @@ func putUniqueIndexes( } } - // index root object + if err := putExpirationEpoch(tx, obj, addr, objKey); err != nil { + return err + } + + return putSplitInfo(tx, obj, bucketName, addr, si, objKey) +} + +func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, objKey []byte) error { + switch obj.Type() { + case objectSDK.TypeRegular: + bucketName = primaryBucketName(addr.Container(), bucketName) + case objectSDK.TypeTombstone: + bucketName = tombstoneBucketName(addr.Container(), bucketName) + case objectSDK.TypeLock: + bucketName = bucketNameLockers(addr.Container(), bucketName) + default: + return ErrUnknownObjectType + } + rawObject, err := obj.CutPayload().Marshal() + if err != nil { + return fmt.Errorf("marshal object header: %w", err) + } + return putUniqueIndexItem(tx, namedBucketItem{ + name: bucketName, + key: objKey, + val: rawObject, + }) +} + +func putExpirationEpoch(tx *bbolt.Tx, obj *objectSDK.Object, addr oid.Address, objKey []byte) error { + if expEpoch, ok := hasExpirationEpoch(obj); ok { + err := putUniqueIndexItem(tx, namedBucketItem{ + name: expEpochToObjectBucketName, + key: expirationEpochKey(expEpoch, addr.Container(), addr.Object()), + val: zeroValue, + }) + if err != nil { + return err + } + val := make([]byte, epochSize) + binary.LittleEndian.PutUint64(val, expEpoch) + err = putUniqueIndexItem(tx, namedBucketItem{ + name: objectToExpirationEpochBucketName(addr.Container(), make([]byte, bucketKeySize)), + key: objKey, + val: val, + }) + if err != nil { + return err + } + } + return nil +} + +func putSplitInfo(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, si *objectSDK.SplitInfo, objKey []byte) error { if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() { if ecHead := obj.ECHeader(); ecHead != nil { parentID := ecHead.Parent() @@ -260,9 +300,8 @@ func putUniqueIndexes( } objKey = objectKey(parentID, objKey) } - return updateSplitInfoIndex(tx, objKey, cnr, bucketName, si) + return updateSplitInfoIndex(tx, objKey, addr.Container(), bucketName, si) } - return nil } @@ -297,18 +336,6 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun objKey := objectKey(idObj, make([]byte, objectKeySize)) bucketName := make([]byte, bucketKeySize) - cs, _ := obj.PayloadChecksum() - - // index payload hashes - err := f(tx, namedBucketItem{ - name: payloadHashBucketName(cnr, bucketName), - key: cs.Value(), - val: objKey, - }) - if err != nil { - return err - } - idParent, ok := obj.ParentID() // index parent ids @@ -373,21 +400,23 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun return nil } +var indexedAttributes = map[string]struct{}{ + "S3-Access-Box-CRDT-Name": {}, + objectSDK.AttributeFilePath: {}, +} + +// IsAtrributeIndexed returns True if attribute is indexed by metabase. +func IsAtrributeIndexed(attr string) bool { + _, found := indexedAttributes[attr] + return found +} + func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error { id, _ := obj.ID() cnr, _ := obj.ContainerID() objKey := objectKey(id, make([]byte, objectKeySize)) key := make([]byte, bucketKeySize) - err := f(tx, namedBucketItem{ - name: ownerBucketName(cnr, key), - key: []byte(obj.OwnerID().EncodeToString()), - val: objKey, - }) - if err != nil { - return err - } - var attrs []objectSDK.Attribute if obj.ECHeader() != nil { attrs = obj.ECHeader().ParentAttributes() @@ -398,6 +427,9 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun // user specified attributes for i := range attrs { + if !IsAtrributeIndexed(attrs[i].Key()) { + continue + } key = attributeBucketName(cnr, attrs[i].Key(), key) err := f(tx, namedBucketItem{ name: key, @@ -412,6 +444,20 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun return nil } +func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) { + attributes := obj.Attributes() + if ech := obj.ECHeader(); ech != nil { + attributes = ech.ParentAttributes() + } + for _, attr := range attributes { + if attr.Key() == objectV2.SysAttributeExpEpoch { + expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64) + return expEpoch, err == nil + } + } + return 0, false +} + type bucketContainer interface { Bucket([]byte) *bbolt.Bucket CreateBucket([]byte) (*bbolt.Bucket, error) @@ -428,7 +474,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } data, err := update(bkt.Get(item.key), item.val) @@ -445,12 +491,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } fkbtRoot, err := createBucketLikelyExists(bkt, item.key) if err != nil { - return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err) + return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err) } return fkbtRoot.Put(item.val, zeroValue) @@ -459,19 +505,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } lst, err := decodeList(bkt.Get(item.key)) if err != nil { - return fmt.Errorf("can't decode leaf list %v: %w", item.key, err) + return fmt.Errorf("decode leaf list %v: %w", item.key, err) } lst = append(lst, item.val) encodedLst, err := encodeList(lst) if err != nil { - return fmt.Errorf("can't encode leaf list %v: %w", item.key, err) + return fmt.Errorf("encode leaf list %v: %w", item.key, err) } return bkt.Put(item.key, encodedLst) diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 84e4029f2..f37ed4cf2 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(runtime.NumCPU())) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() // Ensure the benchmark is bound by CPU and not waiting batch-delay time. b.SetParallelism(1) @@ -68,13 +68,13 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(1)) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() var index atomic.Int64 index.Store(-1) objs := prepareObjects(b.N) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if err := metaPut(db, objs[index.Add(1)], nil); err != nil { b.Fatal(err) } @@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) { func TestDB_PutBlobovniczaUpdate(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() storageID := []byte{1, 2, 3, 4} diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 66f5eefc6..5f0956f0b 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -30,14 +30,14 @@ func TestResetDropsContainerBuckets(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() - for idx := 0; idx < 100; idx++ { + for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) + putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 3a4d7a227..60da50671 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -8,17 +8,15 @@ import ( "strings" "time" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" ) type ( @@ -37,8 +35,9 @@ type ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + useAttributeIndex bool } // SelectRes groups the resulting values of Select operation. @@ -56,6 +55,10 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) { p.filters = fs } +func (p *SelectPrm) SetUseAttributeIndex(v bool) { + p.useAttributeIndex = v +} + // AddressList returns list of addresses of the selected objects. func (r SelectRes) AddressList() []oid.Address { return r.addrList @@ -92,14 +95,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err currEpoch := db.epochState.CurrentEpoch() return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch) + res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex) success = err == nil return err })) } -func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) { - group, err := groupFilters(fs) +func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) { + group, err := groupFilters(fs, useAttributeIndex) if err != nil { return nil, err } @@ -128,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters res := make([]oid.Address, 0, len(mAddr)) + bc := newBucketCache() for a, ind := range mAddr { if ind != expLen { continue // ignore objects with unmatched fast filters @@ -142,12 +146,16 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters var addr oid.Address addr.SetContainer(cnr) addr.SetObject(id) - - if objectStatus(tx, addr, currEpoch) > 0 { + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) + if err != nil { + return nil, err + } + if st > 0 { continue // ignore removed objects } - if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) { + addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) + if !match { continue // ignore objects with unmatched slow filters } @@ -195,12 +203,6 @@ func (db *DB) selectFastFilter( switch f.Header() { case v2object.FilterHeaderObjectID: db.selectObjectID(tx, f, cnr, to, fNum, currEpoch) - case v2object.FilterHeaderOwnerID: - bucketName := ownerBucketName(cnr, bucketName) - db.selectFromFKBT(tx, bucketName, f, to, fNum) - case v2object.FilterHeaderPayloadHash: - bucketName := payloadHashBucketName(cnr, bucketName) - db.selectFromList(tx, bucketName, f, to, fNum) case v2object.FilterHeaderObjectType: for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) { selectAllFromBucket(tx, bucketName, to, fNum) @@ -222,7 +224,6 @@ func (db *DB) selectFastFilter( selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum) default: // user attribute bucketName := attributeBucketName(cnr, f.Header(), bucketName) - if f.Operation() == objectSDK.MatchNotPresent { selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum) } else { @@ -243,7 +244,6 @@ func allBucketNames(cnr cid.ID) (names [][]byte) { names = append(names, fn(cnr, make([]byte, bucketKeySize))) } } - return } @@ -278,8 +278,6 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str return } -// selectFromList looks into index to find list of addresses to add in -// resulting cache. func (db *DB) selectFromFKBT( tx *bbolt.Tx, name []byte, // fkbt root bucket name @@ -289,8 +287,6 @@ func (db *DB) selectFromFKBT( ) { // matchFunc, ok := db.matchers[f.Operation()] if !ok { - db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation()))) - return } @@ -299,7 +295,7 @@ func (db *DB) selectFromFKBT( return } - err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { + _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { fkbtLeaf := fkbtRoot.Bucket(k) if fkbtLeaf == nil { return nil @@ -311,9 +307,6 @@ func (db *DB) selectFromFKBT( return nil }) }) - if err != nil { - db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error())) - } } // selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in @@ -378,24 +371,17 @@ func (db *DB) selectFromList( case objectSDK.MatchStringEqual: lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) if err != nil { - db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error())) return } default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op))) - return } if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error { l, err := decodeList(val) if err != nil { - db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, - zap.String("error", err.Error()), - ) - return err } @@ -403,10 +389,6 @@ func (db *DB) selectFromList( return nil }); err != nil { - db.log.Debug(logs.MetabaseCantIterateOverTheBucket, - zap.String("error", err.Error()), - ) - return } } @@ -448,10 +430,6 @@ func (db *DB) selectObjectID( default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug(logs.MetabaseUnknownOperation, - zap.Uint32("operation", uint32(f.Operation())), - ) - return } @@ -462,70 +440,122 @@ func (db *DB) selectObjectID( return } - err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error { + _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error { var id oid.ID if err := id.Decode(k); err == nil { appendOID(id) } return nil }) - if err != nil { - db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets, - zap.String("error", err.Error()), - ) - } } } } // matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) bool { +func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { + result := addr if len(f) == 0 { - return true + return result, true } - buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, true, false, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) if err != nil { - return false + return result, false } for i := range f { - matchFunc, ok := db.matchers[f[i].Operation()] - if !ok { - return false - } - var data []byte - switch f[i].Header() { case v2object.FilterHeaderVersion: data = []byte(obj.Version().String()) case v2object.FilterHeaderHomomorphicHash: + if isECChunk { + return result, false // EC chunk and EC parent hashes are incomparable + } cs, _ := obj.PayloadHomomorphicHash() data = cs.Value() case v2object.FilterHeaderCreationEpoch: data = make([]byte, 8) binary.LittleEndian.PutUint64(data, obj.CreationEpoch()) case v2object.FilterHeaderPayloadLength: + if isECChunk { + return result, false // EC chunk and EC parent payload lengths are incomparable + } data = make([]byte, 8) binary.LittleEndian.PutUint64(data, obj.PayloadSize()) - default: - continue // ignore unknown search attributes + case v2object.FilterHeaderOwnerID: + data = []byte(obj.OwnerID().EncodeToString()) + case v2object.FilterHeaderPayloadHash: + if isECChunk { + return result, false // EC chunk and EC parent payload hashes are incomparable + } + cs, _ := obj.PayloadChecksum() + data = cs.Value() + default: // user attribute + v, ok := attributeValue(obj, f[i].Header()) + if ok { + if ech := obj.ECHeader(); ech != nil { + result.SetObject(ech.Parent()) + } + data = []byte(v) + } else { + return result, f[i].Operation() == objectSDK.MatchNotPresent + } + } + + matchFunc, ok := db.matchers[f[i].Operation()] + if !ok { + return result, false } if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) { - return false + return result, false } } - return true + return result, true +} + +func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { + buf := make([]byte, addressKeySize) + obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) + if err != nil { + var ecInfoError *objectSDK.ECInfoError + if errors.As(err, &ecInfoError) { + for _, chunk := range ecInfoError.ECInfo().Chunks { + var objID oid.ID + if err = objID.ReadFromV2(chunk.ID); err != nil { + continue + } + addr.SetObject(objID) + obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) + if err == nil { + return obj, true, nil + } + } + } + return nil, false, err + } + return obj, false, nil +} + +func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) { + objectAttributes := obj.Attributes() + if ech := obj.ECHeader(); ech != nil { + objectAttributes = ech.ParentAttributes() + } + for _, attr := range objectAttributes { + if attr.Key() == attribute { + return attr.Value(), true + } + } + return "", false } // groupFilters divides filters in two groups: fast and slow. Fast filters // processed by indexes and slow filters processed after by unmarshaling // object headers. -func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) { +func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) { res := filterGroup{ fastFilters: make(objectSDK.SearchFilters, 0, len(filters)), slowFilters: make(objectSDK.SearchFilters, 0, len(filters)), @@ -536,18 +566,25 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) { case v2object.FilterHeaderContainerID: // support deprecated field err := res.cnr.DecodeString(filters[i].Value()) if err != nil { - return filterGroup{}, fmt.Errorf("can't parse container id: %w", err) + return filterGroup{}, fmt.Errorf("parse container id: %w", err) } res.withCnrFilter = true - case // slow filters - v2object.FilterHeaderVersion, - v2object.FilterHeaderCreationEpoch, - v2object.FilterHeaderPayloadLength, - v2object.FilterHeaderHomomorphicHash: - res.slowFilters = append(res.slowFilters, filters[i]) - default: // fast filters or user attributes if unknown + case // fast filters + v2object.FilterHeaderObjectID, + v2object.FilterHeaderObjectType, + v2object.FilterHeaderParent, + v2object.FilterHeaderSplitID, + v2object.FilterHeaderECParent, + v2object.FilterPropertyRoot, + v2object.FilterPropertyPhy: res.fastFilters = append(res.fastFilters, filters[i]) + default: + if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) { + res.fastFilters = append(res.fastFilters, filters[i]) + } else { + res.slowFilters = append(res.slowFilters, filters[i]) + } } } diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 8f9294d07..ce2156d2e 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -7,10 +7,10 @@ import ( "strconv" "testing" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -26,9 +26,19 @@ import ( func TestDB_SelectUserAttributes(t *testing.T) { t.Parallel() + t.Run("with_index", func(t *testing.T) { + testSelectUserAttributes(t, true) + }) + t.Run("without_index", func(t *testing.T) { + testSelectUserAttributes(t, false) + }) +} + +func testSelectUserAttributes(t *testing.T, index bool) { + t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -36,124 +46,161 @@ func TestDB_SelectUserAttributes(t *testing.T) { testutil.AddAttribute(raw1, "foo", "bar") testutil.AddAttribute(raw1, "x", "y") - err := putBig(db, raw1) + var putPrm meta.PutPrm + putPrm.SetIndexAttributes(index) + putPrm.SetObject(raw1) + _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) raw2 := testutil.GenerateObjectWithCID(cnr) testutil.AddAttribute(raw2, "foo", "bar") testutil.AddAttribute(raw2, "x", "z") - err = putBig(db, raw2) + putPrm.SetObject(raw2) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw3 := testutil.GenerateObjectWithCID(cnr) testutil.AddAttribute(raw3, "a", "b") - err = putBig(db, raw3) + putPrm.SetObject(raw3) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw4 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw4, "path", "test/1/2") + testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2") - err = putBig(db, raw4) + putPrm.SetObject(raw4) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw5 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw5, "path", "test/1/3") + testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3") - err = putBig(db, raw5) + putPrm.SetObject(raw5) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw6 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw6, "path", "test/2/3") + testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3") - err = putBig(db, raw6) + putPrm.SetObject(raw6) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) + raw7 := testutil.GenerateObjectWithCID(cnr) + var attr objectSDK.Attribute + attr.SetKey(objectSDK.AttributeFilePath) + attr.SetValue("/test/3/4") + attrs := raw7.Attributes() + attrs = append(attrs, attr) + ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{ + ID: oidtest.ID(), + Attributes: attrs, + }, 0, 3, []byte{}, 0) + raw7.SetECHeader(ech) + putPrm.SetObject(raw7) + _, err = db.Put(context.Background(), putPrm) + require.NoError(t, err) + var raw7Parent oid.Address + raw7Parent.SetContainer(cnr) + raw7Parent.SetObject(ech.Parent()) + fs := objectSDK.SearchFilters{} fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), ) fs = objectSDK.SearchFilters{} fs.AddFilter("x", "y", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1)) fs = objectSDK.SearchFilters{} fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2)) fs = objectSDK.SearchFilters{} fs.AddFilter("a", "b", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw3)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3)) fs = objectSDK.SearchFilters{} fs.AddFilter("c", "d", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) + testSelect2(t, db, cnr, fs, index) fs = objectSDK.SearchFilters{} fs.AddFilter("foo", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3), object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), + object.AddressOf(raw7), ) fs = objectSDK.SearchFilters{} fs.AddFilter("a", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), + object.AddressOf(raw7), ) fs = objectSDK.SearchFilters{} - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw3), object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), + object.AddressOf(raw7), ) fs = objectSDK.SearchFilters{} fs.AddFilter("key", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw3), object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), + object.AddressOf(raw7), ) fs = objectSDK.SearchFilters{} - fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix) - testSelect(t, db, cnr, fs, + fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), + raw7Parent, ) fs = objectSDK.SearchFilters{} - fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix) - testSelect(t, db, cnr, fs, + fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw4), object.AddressOf(raw5), ) + + fs = objectSDK.SearchFilters{} + fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual) + testSelect2(t, db, cnr, fs, index, + raw7Parent, + ) } func TestDB_SelectRootPhyParent(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -307,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -325,11 +372,7 @@ func TestDB_SelectInhume(t *testing.T) { object.AddressOf(raw2), ) - var tombstone oid.Address - tombstone.SetContainer(cnr) - tombstone.SetObject(oidtest.ID()) - - err = metaInhume(db, object.AddressOf(raw2), tombstone) + err = metaInhume(db, object.AddressOf(raw2), oidtest.ID()) require.NoError(t, err) fs = objectSDK.SearchFilters{} @@ -342,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -413,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -521,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -633,6 +676,160 @@ func TestDB_SelectObjectID(t *testing.T) { }) } +func TestDB_SelectOwnerID(t *testing.T) { + t.Parallel() + + db := newDB(t) + defer func() { require.NoError(t, db.Close(context.Background())) }() + + cnr := cidtest.ID() + + // prepare + + parent := testutil.GenerateObjectWithCID(cnr) + + regular := testutil.GenerateObjectWithCID(cnr) + idParent, _ := parent.ID() + regular.SetParentID(idParent) + regular.SetParent(parent) + + err := putBig(db, regular) + require.NoError(t, err) + + ts := testutil.GenerateObjectWithCID(cnr) + ts.SetType(objectSDK.TypeTombstone) + err = putBig(db, ts) + require.NoError(t, err) + + lock := testutil.GenerateObjectWithCID(cnr) + lock.SetType(objectSDK.TypeLock) + err = putBig(db, lock) + require.NoError(t, err) + + t.Run("not found objects", func(t *testing.T) { + raw := testutil.GenerateObjectWithCID(cnr) + + fs := objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, raw.OwnerID()) + + testSelect(t, db, cnr, fs) + + fs = objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, raw.OwnerID()) + + testSelect(t, db, cnr, fs, + object.AddressOf(regular), + object.AddressOf(parent), + object.AddressOf(ts), + object.AddressOf(lock), + ) + }) + + t.Run("regular objects", func(t *testing.T) { + fs := objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, regular.OwnerID()) + testSelect(t, db, cnr, fs, object.AddressOf(regular)) + + fs = objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, regular.OwnerID()) + testSelect(t, db, cnr, fs, + object.AddressOf(parent), + object.AddressOf(ts), + object.AddressOf(lock), + ) + }) + + t.Run("tombstone objects", func(t *testing.T) { + fs := objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, ts.OwnerID()) + testSelect(t, db, cnr, fs, object.AddressOf(ts)) + + fs = objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, ts.OwnerID()) + testSelect(t, db, cnr, fs, + object.AddressOf(regular), + object.AddressOf(parent), + object.AddressOf(lock), + ) + }) + + t.Run("parent objects", func(t *testing.T) { + fs := objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, parent.OwnerID()) + testSelect(t, db, cnr, fs, object.AddressOf(parent)) + + fs = objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, parent.OwnerID()) + testSelect(t, db, cnr, fs, + object.AddressOf(regular), + object.AddressOf(ts), + object.AddressOf(lock), + ) + }) + + t.Run("lock objects", func(t *testing.T) { + fs := objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, lock.OwnerID()) + testSelect(t, db, cnr, fs, object.AddressOf(lock)) + + fs = objectSDK.SearchFilters{} + fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, lock.OwnerID()) + testSelect(t, db, cnr, fs, + object.AddressOf(regular), + object.AddressOf(parent), + object.AddressOf(ts), + ) + }) +} + +func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { + t.Parallel() + + db := newDB(t) + defer func() { require.NoError(t, db.Close(context.Background())) }() + + cnr := cidtest.ID() + ecChunk1 := oidtest.ID() + ecChunk2 := oidtest.ID() + ecParent := oidtest.ID() + var ecParentAddr oid.Address + ecParentAddr.SetContainer(cnr) + ecParentAddr.SetObject(ecParent) + var ecParentAttr []objectSDK.Attribute + var attr objectSDK.Attribute + attr.SetKey(objectSDK.AttributeFilePath) + attr.SetValue("/1/2/3") + ecParentAttr = append(ecParentAttr, attr) + + chunkObj := testutil.GenerateObjectWithCID(cnr) + chunkObj.SetID(ecChunk1) + chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) + chunkObj.SetPayloadSize(uint64(5)) + chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0)) + + chunkObj2 := testutil.GenerateObjectWithCID(cnr) + chunkObj2.SetID(ecChunk2) + chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) + chunkObj2.SetPayloadSize(uint64(10)) + chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0)) + + // put object with EC + + var prm meta.PutPrm + prm.SetObject(chunkObj) + _, err := db.Put(context.Background(), prm) + require.NoError(t, err) + + prm.SetObject(chunkObj2) + _, err = db.Put(context.Background(), prm) + require.NoError(t, err) + + fs := objectSDK.SearchFilters{} + fs.AddRootFilter() + fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix) + testSelect(t, db, cnr, fs, ecParentAddr) +} + type testTarget struct { objects []*objectSDK.Object } @@ -668,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -709,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde t.Run("first last, then linking", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) require.NoError(t, metaPut(db, linking, nil)) @@ -733,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("first linking, then last", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, linking, nil)) require.NoError(t, metaPut(db, lastPart, nil)) @@ -757,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("only last part", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) @@ -787,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -814,7 +1011,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ec, err := erasurecode.NewConstructor(dataCount, parityCount) require.NoError(t, err) - for i := 0; i < partCount; i++ { + for i := range partCount { cs, err := ec.Split(tt.objects[i], &pk.PrivateKey) require.NoError(t, err) @@ -855,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -912,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -960,11 +1157,11 @@ func TestDB_SelectContainerID(t *testing.T) { func BenchmarkSelect(b *testing.B) { const objCount = 1000 db := newDB(b) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() cid := cidtest.ID() - for i := 0; i < objCount; i++ { + for i := range objCount { var attr objectSDK.Attribute attr.SetKey("myHeader") attr.SetValue(strconv.Itoa(i)) @@ -1002,28 +1199,30 @@ func TestExpiredObjects(t *testing.T) { t.Parallel() db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { cidExp, _ := exp.ContainerID() cidNonExp, _ := nonExp.ContainerID() - objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}) + objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false) require.NoError(t, err) require.Empty(t, objs) // expired object should not be returned - objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}) + objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false) require.NoError(t, err) require.NotEmpty(t, objs) }) } func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { + b.ReportAllocs() + var prm meta.SelectPrm prm.SetContainerID(cid) prm.SetFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.Select(context.Background(), prm) if err != nil { b.Fatal(err) @@ -1034,10 +1233,11 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear } } -func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) { +func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) { var prm meta.SelectPrm prm.SetFilters(fs) prm.SetContainerID(cnr) + prm.SetUseAttributeIndex(useAttributeIndex) res, err := db.Select(context.Background(), prm) return res.AddressList(), err diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go index 88446494e..72618b1a0 100644 --- a/pkg/local_object_storage/metabase/shard_id.go +++ b/pkg/local_object_storage/metabase/shard_id.go @@ -2,6 +2,7 @@ package meta import ( "bytes" + "context" "errors" "fmt" "os" @@ -21,7 +22,7 @@ var ( // If id is missing, returns nil, nil. // // GetShardID does not report any metrics. -func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) { +func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = mode @@ -30,14 +31,14 @@ func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) { return nil, nil } - if err := db.openDB(mode); err != nil { - return nil, fmt.Errorf("failed to open metabase: %w", err) + if err := db.openDB(ctx, mode); err != nil { + return nil, fmt.Errorf("open metabase: %w", err) } id, err := db.readShardID() if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return id, metaerr.Wrap(err) @@ -59,7 +60,7 @@ func (db *DB) readShardID() ([]byte, error) { // SetShardID sets metabase operation mode // and writes shard id to db. -func (db *DB) SetShardID(id []byte, mode metamode.Mode) error { +func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = mode @@ -68,8 +69,8 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error { return ErrReadOnlyMode } - if err := db.openDB(mode); err != nil { - return fmt.Errorf("failed to open metabase: %w", err) + if err := db.openDB(ctx, mode); err != nil { + return fmt.Errorf("open metabase: %w", err) } err := db.writeShardID(id) @@ -78,7 +79,7 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error { } if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return metaerr.Wrap(err) diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go index 6d620b41a..8f2376503 100644 --- a/pkg/local_object_storage/metabase/storage_id.go +++ b/pkg/local_object_storage/metabase/storage_id.go @@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte { // StorageID returns storage descriptor for objects from the blobstor. // It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) { +func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) { var ( startedAt = time.Now() success = false @@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes db.modeMtx.RLock() defer db.modeMtx.RUnlock() + var res StorageIDRes if db.mode.NoMetabase() { return res, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.id, err = db.storageID(tx, prm.addr) - - return err + err := db.boltDB.View(func(tx *bbolt.Tx) error { + res.id = db.storageID(tx, prm.addr) + return nil }) success = err == nil return res, metaerr.Wrap(err) } -func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) { +func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte { key := make([]byte, bucketKeySize) smallBucket := tx.Bucket(smallBucketName(addr.Container(), key)) if smallBucket == nil { - return nil, nil + return nil } storageID := smallBucket.Get(objectKey(addr.Object(), key)) if storageID == nil { - return nil, nil + return nil } - return bytes.Clone(storageID), nil + return bytes.Clone(storageID) } // UpdateStorageIDPrm groups the parameters of UpdateStorageID operation. diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go index aaf6480ab..fef680159 100644 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ b/pkg/local_object_storage/metabase/storage_id_test.go @@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() raw2 := testutil.GenerateObject() @@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) { cnrID, ok := deleted.ContainerID() require.True(t, ok) ts := testutil.GenerateObjectWithCID(cnrID) - require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts))) + require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object())) // check StorageID for object without storageID fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2)) @@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() putStorageID := []byte{1, 2, 3} wcStorageID := []byte{1, 2, 3, 4, 5} diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go new file mode 100644 index 000000000..4948f3424 --- /dev/null +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -0,0 +1,602 @@ +package meta + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "os" + "strconv" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.etcd.io/bbolt" + "golang.org/x/sync/errgroup" +) + +const ( + upgradeLogFrequency = 50_000 + upgradeWorkersCount = 1_000 + compactMaxTxSize = 256 << 20 + upgradeTimeout = 1 * time.Second +) + +var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{ + 2: upgradeFromV2ToV3, + 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error { + log("metabase already upgraded") + return nil + }, +} + +func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error { + if _, err := os.Stat(path); err != nil { + return fmt.Errorf("check metabase existence: %w", err) + } + opts := bbolt.DefaultOptions + opts.Timeout = upgradeTimeout + db, err := bbolt.Open(path, os.ModePerm, opts) + if err != nil { + return fmt.Errorf("open metabase: %w", err) + } + var version uint64 + if err := db.View(func(tx *bbolt.Tx) error { + var e error + version, e = currentVersion(tx) + return e + }); err != nil { + return err + } + updater, found := updates[version] + if !found { + return fmt.Errorf("unsupported version %d: no update available", version) + } + if err := db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(shardInfoBucket) + return b.Put(upgradeKey, zeroValue) + }); err != nil { + return fmt.Errorf("set upgrade key %w", err) + } + if err := updater(ctx, db, cs, log); err != nil { + return fmt.Errorf("update metabase schema: %w", err) + } + if err := db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(shardInfoBucket) + return b.Delete(upgradeKey) + }); err != nil { + return fmt.Errorf("delete upgrade key %w", err) + } + if compact { + log("compacting metabase...") + err := compactDB(db) + if err != nil { + return fmt.Errorf("compact metabase: %w", err) + } + log("metabase compacted") + } + return db.Close() +} + +func compactDB(db *bbolt.DB) error { + sourcePath := db.Path() + tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339) + f, err := os.Stat(sourcePath) + if err != nil { + return err + } + dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{ + Timeout: 100 * time.Millisecond, + NoSync: true, + }) + if err != nil { + return fmt.Errorf("open new metabase to compact: %w", err) + } + if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil { + return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName))) + } + if err := dst.Sync(); err != nil { + return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName))) + } + if err := dst.Close(); err != nil { + return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName))) + } + if err := db.Close(); err != nil { + return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName))) + } + if err := os.Rename(tmpFileName, sourcePath); err != nil { + return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName))) + } + return nil +} + +func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { + if err := createExpirationEpochBuckets(ctx, db, log); err != nil { + return err + } + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + return dropUserAttributes(ctx, db, cs, log) + }) + eg.Go(func() error { + return dropOwnerIDIndex(ctx, db, log) + }) + eg.Go(func() error { + return dropPayloadChecksumIndex(ctx, db, log) + }) + if err := eg.Wait(); err != nil { + return err + } + return db.Update(func(tx *bbolt.Tx) error { + return updateVersion(tx, version) + }) +} + +type objectIDToExpEpoch struct { + containerID cid.ID + objectID oid.ID + expirationEpoch uint64 +} + +func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { + log("filling expiration epoch buckets...") + if err := db.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName) + return err + }); err != nil { + return err + } + objects := make(chan objectIDToExpEpoch) + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + return selectObjectsWithExpirationEpoch(ctx, db, objects) + }) + var count atomic.Uint64 + for range upgradeWorkersCount { + eg.Go(func() error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objects: + if !ok { + return nil + } + if err := db.Batch(func(tx *bbolt.Tx) error { + if err := putUniqueIndexItem(tx, namedBucketItem{ + name: expEpochToObjectBucketName, + key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID), + val: zeroValue, + }); err != nil { + return err + } + val := make([]byte, epochSize) + binary.LittleEndian.PutUint64(val, obj.expirationEpoch) + return putUniqueIndexItem(tx, namedBucketItem{ + name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)), + key: objectKey(obj.objectID, make([]byte, objectKeySize)), + val: val, + }) + }); err != nil { + return err + } + } + if c := count.Add(1); c%upgradeLogFrequency == 0 { + log("expiration epoch filled for", c, "objects...") + } + } + }) + } + err := eg.Wait() + if err != nil { + log("expiration epoch buckets completed completed with error:", err) + return err + } + log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects") + return nil +} + +func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error { + defer close(objects) + + const batchSize = 1000 + it := &objectsWithExpirationEpochBatchIterator{ + lastAttributeKey: usrAttrPrefix, + } + for { + if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil { + return err + } + for _, item := range it.items { + select { + case <-ctx.Done(): + return ctx.Err() + case objects <- item: + } + } + + if len(it.items) < batchSize { + return nil + } + it.items = nil + } +} + +var ( + usrAttrPrefix = []byte{userAttributePrefix} + errBatchSizeLimit = errors.New("batch size limit") +) + +type objectsWithExpirationEpochBatchIterator struct { + lastAttributeKey []byte + lastAttributeValue []byte + lastAttrKeyValueItem []byte + items []objectIDToExpEpoch +} + +// - {prefix}{containerID}{attributeKey} <- bucket +// -- {attributeValue} <- bucket, expirationEpoch +// --- {objectID}: zeroValue <- record + +func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error { + seekAttrValue := it.lastAttributeValue + seekAttrKVItem := it.lastAttrKeyValueItem + err := db.View(func(tx *bbolt.Tx) error { + attrKeyC := tx.Cursor() + for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if len(attrKey) <= 1+cidSize { + continue + } + attributeKey := string(attrKey[1+cidSize:]) + if attributeKey != objectV2.SysAttributeExpEpoch { + continue + } + var containerID cid.ID + if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil { + return fmt.Errorf("decode container id from user attribute bucket: %w", err) + } + if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil { + return err + } + seekAttrValue = nil + seekAttrKVItem = nil + } + return nil + }) + if err != nil && !errors.Is(err, errBatchSizeLimit) { + return err + } + return nil +} + +func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error { + attrValueC := b.Cursor() + for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if v != nil { + continue // need to iterate over buckets, not records + } + expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64) + if err != nil { + return fmt.Errorf("parse expiration epoch: %w", err) + } + expirationEpochBucket := b.Bucket(attrValue) + attrKeyValueC := expirationEpochBucket.Cursor() + for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if v == nil { + continue // need to iterate over records, not buckets + } + if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) { + continue + } + var objectID oid.ID + if err := objectID.Decode(attrKeyValueItem); err != nil { + return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err) + } + it.lastAttributeKey = bytes.Clone(attrKey) + it.lastAttributeValue = bytes.Clone(attrValue) + it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem) + it.items = append(it.items, objectIDToExpEpoch{ + containerID: containerID, + objectID: objectID, + expirationEpoch: expirationEpoch, + }) + if len(it.items) == batchSize { + return errBatchSizeLimit + } + } + seekAttrKVItem = nil + } + return nil +} + +func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { + log("deleting user attribute buckets...") + const batch = 1000 + prefix := []byte{userAttributePrefix} + last := prefix + var count uint64 + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + var keys [][]byte + if err := db.View(func(tx *bbolt.Tx) error { + c := tx.Cursor() + for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() { + if bytes.Equal(last, k) { + continue + } + keys = append(keys, bytes.Clone(k)) + } + return nil + }); err != nil { + log("deleting user attribute buckets completed with an error:", err) + return err + } + if len(keys) == 0 { + log("deleting user attribute buckets completed successfully, deleted", count, "buckets") + return nil + } + last = keys[len(keys)-1] + cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys) + if err != nil { + log("deleting user attribute buckets completed with an error:", err) + return err + } + count += cnt + cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys) + if err != nil { + log("deleting user attribute buckets completed with an error:", err) + return err + } + count += cnt + log("deleted", count, "user attribute buckets") + } +} + +func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { + keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs) + if err != nil { + return 0, fmt.Errorf("select non indexed user attributes: %w", err) + } + if err := db.Batch(func(tx *bbolt.Tx) error { + for _, k := range keysToDrop { + if err := tx.DeleteBucket(k); err != nil { + return err + } + } + return nil + }); err != nil { + return 0, fmt.Errorf("drop non indexed user attributes: %w", err) + } + return uint64(len(keysToDrop)), nil +} + +func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) { + var keysToDrop [][]byte + for _, key := range keys { + attr, ok := attributeFromAttributeBucket(key) + if !ok { + return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) + } + if !IsAtrributeIndexed(attr) { + keysToDrop = append(keysToDrop, key) + continue + } + contID, ok := cidFromAttributeBucket(key) + if !ok { + return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) + } + info, err := cs.Info(ctx, contID) + if err != nil { + return nil, err + } + if info.Removed || !info.Indexed { + keysToDrop = append(keysToDrop, key) + } + } + return keysToDrop, nil +} + +func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) { + var dropBuckets [][]byte + for _, key := range keys { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + if err := dropEmptyNestedBuckets(ctx, db, key); err != nil { + return 0, err + } + + empty, exists, err := bucketIsEmpty(db, key) + if err != nil { + return 0, err + } + if empty && exists { + dropBuckets = append(dropBuckets, key) + } + } + if len(dropBuckets) == 0 { + return 0, nil + } + if err := db.Batch(func(tx *bbolt.Tx) error { + for _, key := range dropBuckets { + if err := tx.DeleteBucket(key); err != nil { + return err + } + } + return nil + }); err != nil { + return 0, fmt.Errorf("drop empty user attributes buckets: %w", err) + } + return uint64(len(dropBuckets)), nil +} + +func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) { + var empty bool + var exists bool + if err := db.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucketKey) + if b == nil { + return nil + } + exists = true + empty = !hasAnyItem(b) + return nil + }); err != nil { + return false, false, fmt.Errorf("bucket empty check: %w", err) + } + return empty, exists, nil +} + +func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error { + var last []byte + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var dropBuckets [][]byte + var err error + dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last) + if err != nil { + return fmt.Errorf("select empty nested buckets: %w", err) + } + if len(dropBuckets) == 0 { + return nil + } + + if err := db.Batch(func(tx *bbolt.Tx) error { + rootBucket := tx.Bucket(rootBucketKey) + if rootBucket == nil { + return nil + } + for _, sb := range dropBuckets { + if err := rootBucket.DeleteBucket(sb); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("drop empty nested buckets: %w", err) + } + } +} + +func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) { + const batchSize = 1000 + var result [][]byte + if err := db.View(func(tx *bbolt.Tx) error { + rootBucket := tx.Bucket(rootBucketKey) + if rootBucket == nil { + return nil + } + c := rootBucket.Cursor() + for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if bytes.Equal(last, k) { + continue + } + last = bytes.Clone(k) + if v != nil { // record + continue + } + nestedBucket := rootBucket.Bucket(k) + if nestedBucket == nil { + continue + } + if !hasAnyItem(nestedBucket) { + result = append(result, bytes.Clone(k)) + } + } + return nil + }); err != nil { + return nil, nil, err + } + return result, last, nil +} + +func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { + return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) { + log(append([]any{"owner ID index:"}, a...)...) + }) +} + +func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { + return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) { + log(append([]any{"payload checksum:"}, a...)...) + }) +} + +func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error { + log("deleting buckets...") + const batch = 1000 + var count uint64 + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + var keys [][]byte + if err := db.View(func(tx *bbolt.Tx) error { + c := tx.Cursor() + for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() { + keys = append(keys, bytes.Clone(k)) + } + return nil + }); err != nil { + log("deleting buckets completed with an error:", err) + return err + } + if len(keys) == 0 { + log("deleting buckets completed successfully, deleted", count, "buckets") + return nil + } + if err := db.Batch(func(tx *bbolt.Tx) error { + for _, k := range keys { + if err := tx.DeleteBucket(k); err != nil { + return err + } + } + return nil + }); err != nil { + log("deleting buckets completed with an error:", err) + return err + } + count += uint64(len(keys)) + log("deleted", count, "buckets") + } +} diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go new file mode 100644 index 000000000..c90de4dd6 --- /dev/null +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -0,0 +1,222 @@ +//go:build integration + +package meta + +import ( + "context" + "fmt" + "io" + "os" + "strconv" + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +const upgradeFilePath = "/path/to/metabase.v2" + +func TestUpgradeV2ToV3(t *testing.T) { + path := createTempCopy(t, upgradeFilePath) + defer func() { + require.NoError(t, os.Remove(path)) + }() + db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t))) + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion) + require.NoError(t, db.Close(context.Background())) + require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log)) + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) + fmt.Println() +} + +type testContainerInfoProvider struct{} + +func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { + return container.Info{}, nil +} + +func createTempCopy(t *testing.T, path string) string { + src, err := os.Open(path) + require.NoError(t, err) + + tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339) + dest, err := os.Create(tmpPath) + require.NoError(t, err) + + _, err = io.Copy(dest, src) + require.NoError(t, err) + + require.NoError(t, src.Close()) + require.NoError(t, dest.Close()) + + return tmpPath +} + +func TestGenerateMetabaseFile(t *testing.T) { + t.Skip("for generating db") + const ( + containersCount = 10_000 + simpleObjectsCount = 500_000 + complexObjectsCount = 500_000 // x2 + deletedByGCMarksCount = 100_000 + deletedByTombstoneCount = 100_000 // x2 + lockedCount = 100_000 // x2 + + allocSize = 128 << 20 + generateWorkersCount = 1_000 + minEpoch = 1_000 + maxFilename = 1_000 + maxStorageID = 10_000 + ) + + db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t))) + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + db.boltDB.AllocSize = allocSize + db.boltDB.NoSync = true + require.NoError(t, db.Init(context.Background())) + containers := make([]cid.ID, containersCount) + for i := range containers { + containers[i] = cidtest.ID() + } + oc, err := db.ObjectCounters() + require.NoError(t, err) + require.True(t, oc.IsZero()) + eg, ctx := errgroup.WithContext(context.Background()) + eg.SetLimit(generateWorkersCount) + // simple objects + for i := range simpleObjectsCount { + i := i + eg.Go(func() error { + obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) + _, err := db.Put(ctx, PutPrm{ + obj: obj, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + require.NoError(t, err) + return nil + }) + } + require.NoError(t, eg.Wait()) + db.log.Info(ctx, "simple objects generated") + eg, ctx = errgroup.WithContext(context.Background()) + eg.SetLimit(generateWorkersCount) + // complex objects + for i := range complexObjectsCount { + i := i + eg.Go(func() error { + parent := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + child := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + child.SetParent(parent) + idParent, _ := parent.ID() + child.SetParentID(idParent) + testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) + testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) + _, err := db.Put(ctx, PutPrm{ + obj: child, + }) + require.NoError(t, err) + return nil + }) + } + require.NoError(t, eg.Wait()) + db.log.Info(ctx, "complex objects generated") + eg, ctx = errgroup.WithContext(context.Background()) + eg.SetLimit(generateWorkersCount) + // simple objects deleted by gc marks + for i := range deletedByGCMarksCount { + i := i + eg.Go(func() error { + obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + _, err := db.Put(ctx, PutPrm{ + obj: obj, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + require.NoError(t, err) + _, err = db.Inhume(ctx, InhumePrm{ + target: []oid.Address{object.AddressOf(obj)}, + }) + require.NoError(t, err) + return nil + }) + } + require.NoError(t, eg.Wait()) + db.log.Info(ctx, "simple objects deleted by gc marks generated") + eg, ctx = errgroup.WithContext(context.Background()) + eg.SetLimit(10000) + // simple objects deleted by tombstones + for i := range deletedByTombstoneCount { + i := i + eg.Go(func() error { + obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + _, err := db.Put(ctx, PutPrm{ + obj: obj, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + tomb.SetType(objectSDK.TypeTombstone) + _, err = db.Put(ctx, PutPrm{ + obj: tomb, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + require.NoError(t, err) + tombAddr := object.AddressOf(tomb) + _, err = db.Inhume(ctx, InhumePrm{ + target: []oid.Address{object.AddressOf(obj)}, + tomb: &tombAddr, + }) + require.NoError(t, err) + return nil + }) + } + require.NoError(t, eg.Wait()) + db.log.Info(ctx, "simple objects deleted by tombstones generated") + eg, ctx = errgroup.WithContext(context.Background()) + eg.SetLimit(generateWorkersCount) + // simple objects locked by locks + for i := range lockedCount { + i := i + eg.Go(func() error { + obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + _, err := db.Put(ctx, PutPrm{ + obj: obj, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + lock := testutil.GenerateObjectWithCID(containers[i%len(containers)]) + lock.SetType(objectSDK.TypeLock) + testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) + _, err = db.Put(ctx, PutPrm{ + obj: lock, + id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), + }) + require.NoError(t, err) + err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()}) + require.NoError(t, err) + return nil + }) + } + require.NoError(t, eg.Wait()) + db.log.Info(ctx, "simple objects locked by locks generated") + require.NoError(t, db.boltDB.Sync()) + require.NoError(t, db.Close(context.Background())) +} diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 9249ae49b..4ad83332b 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -1,10 +1,12 @@ package meta import ( - "bytes" "crypto/sha256" + "encoding/binary" "errors" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -23,6 +25,7 @@ var ( toMoveItBucketName = []byte{toMoveItPrefix} containerVolumeBucketName = []byte{containerVolumePrefix} containerCounterBucketName = []byte{containerCountersPrefix} + expEpochToObjectBucketName = []byte{expirationEpochToObjectPrefix} zeroValue = []byte{0xFF} @@ -89,11 +92,12 @@ const ( // FKBT index buckets. // ==================== - // ownerPrefix is used for prefixing FKBT index buckets mapping owner to object IDs. + // ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs. // Key: owner ID // Value: bucket containing object IDs as keys + // removed in version 3 ownerPrefix - // userAttributePrefix is used for prefixing FKBT index buckets containing objects. + // userAttributePrefix was used for prefixing FKBT index buckets containing objects. // Key: attribute value // Value: bucket containing object IDs as keys userAttributePrefix @@ -102,9 +106,10 @@ const ( // List index buckets. // ==================== - // payloadHashPrefix is used for prefixing List index buckets mapping payload hash to a list of object IDs. + // payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs. // Key: payload hash // Value: list of object IDs + // removed in version 3 payloadHashPrefix // parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs. // Key: parent ID @@ -124,6 +129,16 @@ const ( // Key: container ID + type // Value: Object id ecInfoPrefix + + // expirationEpochToObjectPrefix is used for storing relation between expiration epoch and object id. + // Key: expiration epoch + object address + // Value: zero + expirationEpochToObjectPrefix + + // objectToExpirationEpochPrefix is used for storing relation between expiration epoch and object id. + // Key: object address + // Value: expiration epoch + objectToExpirationEpochPrefix ) const ( @@ -131,6 +146,7 @@ const ( bucketKeySize = 1 + cidSize objectKeySize = sha256.Size addressKeySize = cidSize + objectKeySize + epochSize = 8 ) func bucketName(cnr cid.ID, prefix byte, key []byte) []byte { @@ -154,25 +170,26 @@ func smallBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, smallPrefix, key) } -// attributeBucketName returns _attr_. +// attributeBucketName returns _. func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte { key[0] = userAttributePrefix cnr.Encode(key[1:]) return append(key[:bucketKeySize], attributeKey...) } -// returns from attributeBucketName result, nil otherwise. -func cidFromAttributeBucket(val []byte, attributeKey string) []byte { - if len(val) < bucketKeySize || val[0] != userAttributePrefix || !bytes.Equal(val[bucketKeySize:], []byte(attributeKey)) { - return nil +func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) { + if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { + return cid.ID{}, false } - - return val[1:bucketKeySize] + var result cid.ID + return result, result.Decode(bucketName[1:bucketKeySize]) == nil } -// payloadHashBucketName returns _payloadhash. -func payloadHashBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, payloadHashPrefix, key) +func attributeFromAttributeBucket(bucketName []byte) (string, bool) { + if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { + return "", false + } + return string(bucketName[bucketKeySize:]), true } // rootBucketName returns _root. @@ -180,11 +197,6 @@ func rootBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, rootPrefix, key) } -// ownerBucketName returns _ownerid. -func ownerBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, ownerPrefix, key) -} - // parentBucketName returns _parent. func parentBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, parentPrefix, key) @@ -200,6 +212,35 @@ func ecInfoBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, ecInfoPrefix, key) } +// objectToExpirationEpochBucketName returns objectToExpirationEpochPrefix_. +func objectToExpirationEpochBucketName(cnr cid.ID, key []byte) []byte { + return bucketName(cnr, objectToExpirationEpochPrefix, key) +} + +func expirationEpochKey(epoch uint64, cnr cid.ID, obj oid.ID) []byte { + result := make([]byte, epochSize+addressKeySize) + binary.BigEndian.PutUint64(result, epoch) + cnr.Encode(result[epochSize:]) + obj.Encode(result[epochSize+cidSize:]) + return result +} + +func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) { + if len(key) != epochSize+addressKeySize { + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("unexpected expiration epoch to object key length: %d", len(key)) + } + epoch := binary.BigEndian.Uint64(key) + var cnr cid.ID + if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil { + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err) + } + var obj oid.ID + if err := obj.Decode(key[epochSize+cidSize:]); err != nil { + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err) + } + return epoch, cnr, obj, nil +} + // addressKey returns key for K-V tables when key is a whole address. func addressKey(addr oid.Address, key []byte) []byte { addr.Container().Encode(key) @@ -238,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - if len(objs) == 0 { - panic("empty object list in firstIrregularObjectType") - } + assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go index 5748b64ee..fbc0f1ad9 100644 --- a/pkg/local_object_storage/metabase/version.go +++ b/pkg/local_object_storage/metabase/version.go @@ -2,6 +2,7 @@ package meta import ( "encoding/binary" + "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -9,15 +10,22 @@ import ( ) // version contains current metabase version. -const version = 2 +const version = 3 -var versionKey = []byte("version") +var ( + versionKey = []byte("version") + upgradeKey = []byte("upgrade") +) // ErrOutdatedVersion is returned on initializing // an existing metabase that is not compatible with // the current code version. var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required") +var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed") + +var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket") + func checkVersion(tx *bbolt.Tx, initialized bool) error { var knownVersion bool @@ -32,6 +40,10 @@ func checkVersion(tx *bbolt.Tx, initialized bool) error { return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored) } } + data = b.Get(upgradeKey) + if len(data) > 0 { + return ErrIncompletedUpgrade + } } if !initialized { @@ -55,7 +67,19 @@ func updateVersion(tx *bbolt.Tx, version uint64) error { b, err := tx.CreateBucketIfNotExists(shardInfoBucket) if err != nil { - return fmt.Errorf("can't create auxiliary bucket: %w", err) + return fmt.Errorf("create auxiliary bucket: %w", err) } return b.Put(versionKey, data) } + +func currentVersion(tx *bbolt.Tx) (uint64, error) { + b := tx.Bucket(shardInfoBucket) + if b == nil { + return 0, errVersionUndefinedNoInfoBucket + } + data := b.Get(versionKey) + if len(data) != 8 { + return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data)) + } + return binary.LittleEndian.Uint64(data), nil +} diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go index b2af428ff..b373fb32e 100644 --- a/pkg/local_object_storage/metabase/version_test.go +++ b/pkg/local_object_storage/metabase/version_test.go @@ -45,25 +45,25 @@ func TestVersion(t *testing.T) { t.Run("simple", func(t *testing.T) { db := newDB(t) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) t.Run("reopen", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) t.Run("old data", func(t *testing.T) { db := newDB(t) - require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite)) + require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) t.Run("invalid version", func(t *testing.T) { db := newDB(t) @@ -71,17 +71,37 @@ func TestVersion(t *testing.T) { require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return updateVersion(tx, version+1) })) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.Error(t, db.Init()) - require.NoError(t, db.Close()) + require.Error(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) t.Run("reset", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.Reset()) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) + t.Run("incompleted upgrade", func(t *testing.T) { + db := newDB(t) + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) + + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { + return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue) + })) + require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade) + require.NoError(t, db.Close(context.Background())) + + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { + return tx.Bucket(shardInfoBucket).Delete(upgradeKey) + })) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) + }) } diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go index 76822ac2c..d93363fa3 100644 --- a/pkg/local_object_storage/metrics/fstree.go +++ b/pkg/local_object_storage/metrics/fstree.go @@ -38,6 +38,10 @@ func (m *fstreeMetrics) Iterate(d time.Duration, success bool) { m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success) } +func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) { + m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success) +} + func (m *fstreeMetrics) Delete(d time.Duration, success bool) { m.m.MethodDuration(m.shardID, m.path, "Delete", d, success) } diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go index 520c6dfb4..4c5238921 100644 --- a/pkg/local_object_storage/pilorama/batch.go +++ b/pkg/local_object_storage/pilorama/batch.go @@ -1,9 +1,9 @@ package pilorama import ( + "cmp" "encoding/binary" "slices" - "sort" "sync" "time" @@ -48,8 +48,8 @@ func (b *batch) run() { // Sorting without a mutex is ok, because we append to this slice only if timer is non-nil. // See (*boltForest).addBatch for details. - sort.Slice(b.operations, func(i, j int) bool { - return b.operations[i].Time < b.operations[j].Time + slices.SortFunc(b.operations, func(mi, mj *Move) int { + return cmp.Compare(mi.Time, mj.Time) }) b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time }) diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go index 22b951a41..3156751f2 100644 --- a/pkg/local_object_storage/pilorama/bench_test.go +++ b/pkg/local_object_storage/pilorama/bench_test.go @@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) { WithPath(filepath.Join(tmpDir, "test.db")), WithMaxBatchSize(runtime.GOMAXPROCS(0))) require.NoError(b, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(b, f.Init()) - defer func() { require.NoError(b, f.Close()) }() + require.NoError(b, f.Init(context.Background())) + defer func() { require.NoError(b, f.Close(context.Background())) }() b.Cleanup(func() { require.NoError(b, os.RemoveAll(tmpDir)) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 48363ceac..897b37ea0 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "slices" - "sort" "strconv" "sync" "time" @@ -92,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage { return &b } -func (t *boltForest) SetMode(m mode.Mode) error { +func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { t.modeMtx.Lock() defer t.modeMtx.Unlock() @@ -100,14 +99,14 @@ func (t *boltForest) SetMode(m mode.Mode) error { return nil } - err := t.Close() + err := t.Close(ctx) if err == nil && !m.NoMetabase() { if err = t.openBolt(m); err == nil { - err = t.Init() + err = t.Init(ctx) } } if err != nil { - return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) + return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) } t.mode = m @@ -129,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { readOnly := m.ReadOnly() err := util.MkdirAllX(filepath.Dir(t.path), t.perm) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err)) + return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err)) } opts := *bbolt.DefaultOptions @@ -140,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { t.db, err = bbolt.Open(t.path, t.perm, &opts) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err)) + return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err)) } t.db.MaxBatchSize = t.maxBatchSize @@ -149,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { return nil } -func (t *boltForest) Init() error { +func (t *boltForest) Init(context.Context) error { if t.mode.NoMetabase() || t.db.IsReadOnly() { return nil } @@ -163,7 +162,7 @@ func (t *boltForest) Init() error { }) } -func (t *boltForest) Close() error { +func (t *boltForest) Close(context.Context) error { var err error if t.db != nil { err = t.db.Close() @@ -420,10 +419,7 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri return err } - i, node, err := t.getPathPrefix(bTree, attr, path) - if err != nil { - return err - } + i, node := t.getPathPrefix(bTree, attr, path) ts := t.getLatestTimestamp(bLog, d.Position, d.Size) lm = make([]Move, len(path)-i+1) @@ -559,6 +555,80 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string return metaerr.Wrap(err) } +func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success) + }() + + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + m, err := t.filterSeen(cnr, treeID, m) + if err != nil { + return err + } + if len(m) == 0 { + success = true + return nil + } + + ch := make(chan error) + b := &batch{ + forest: t, + cid: cnr, + treeID: treeID, + results: []chan<- error{ch}, + operations: m, + } + go func() { + b.run() + }() + err = <-ch + success = err == nil + return metaerr.Wrap(err) +} + +func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) { + t.modeMtx.RLock() + defer t.modeMtx.RUnlock() + + if t.mode.NoMetabase() { + return nil, ErrDegradedMode + } + + ops := make([]*Move, 0, len(m)) + err := t.db.View(func(tx *bbolt.Tx) error { + treeRoot := tx.Bucket(bucketName(cnr, treeID)) + if treeRoot == nil { + ops = m + return nil + } + b := treeRoot.Bucket(logBucket) + for _, op := range m { + var logKey [8]byte + binary.BigEndian.PutUint64(logKey[:], op.Time) + seen := b.Get(logKey[:]) != nil + if !seen { + ops = append(ops, op) + } + } + return nil + }) + if err != nil { + return nil, metaerr.Wrap(err) + } + return ops, nil +} + // TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed. func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error { var ( @@ -705,7 +775,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M key, value = c.Prev() } - for i := 0; i < len(ms); i++ { + for i := range ms { // Loop invariant: key represents the next stored timestamp after ms[i].Time. // 2. Insert the operation. @@ -907,10 +977,7 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st b := treeRoot.Bucket(dataBucket) - i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) - if err != nil { - return err - } + i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) if i < len(path)-1 { return nil } @@ -1010,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol } // TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) { +func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { var ( startedAt = time.Now() success = false @@ -1088,19 +1155,24 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = &s + last = NewCursor(s, res[len(res)-1].LastChild()) } return res, last, metaerr.Wrap(err) } -func sortAndCut(result []NodeInfo, last *string) []NodeInfo { +func sortByFilename(nodes []NodeInfo) { + slices.SortFunc(nodes, func(a, b NodeInfo) int { + return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename)) + }) +} + +func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { var lastBytes []byte if last != nil { - lastBytes = []byte(*last) + lastBytes = []byte(last.GetFilename()) } - sort.Slice(result, func(i, j int) bool { - return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1 - }) + sortByFilename(result) + for i := range result { if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 { return result[i:] @@ -1161,7 +1233,8 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f lastFilename = nil nodes = nil length = actualLength + 1 - c.Seek(append(prefix, byte(length), byte(length>>8))) + count = 0 + c.Seek(binary.LittleEndian.AppendUint16(prefix, length)) c.Prev() // c.Next() will be performed by for loop } } @@ -1281,7 +1354,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err return nil }) if err != nil { - return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err)) + return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err)) } success = true return ids, nil @@ -1425,7 +1498,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* var contID cidSDK.ID if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("failed to decode containerID: %w", err) + return fmt.Errorf("decode container ID: %w", err) } res.Items = append(res.Items, ContainerIDTreeID{ CID: contID, @@ -1433,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* }) if len(res.Items) == batchSize { - res.NextPageToken = make([]byte, len(k)) - copy(res.NextPageToken, k) + res.NextPageToken = bytes.Clone(k) break } } @@ -1447,7 +1519,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* return &res, nil } -func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) { +func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) { c := bTree.Cursor() var curNodes []Node @@ -1470,14 +1542,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin } if len(nextNodes) == 0 { - return i, curNodes, nil + return i, curNodes } } - return len(path), nextNodes, nil + return len(path), nextNodes } -func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) { +func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) { c := bTree.Cursor() var curNode Node @@ -1497,10 +1569,10 @@ loop: childKey, value = c.Next() } - return i, curNode, nil + return i, curNode } - return len(path), curNode, nil + return len(path), curNode } func (t *boltForest) moveFromBytes(m *Move, data []byte) error { @@ -1510,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.Meta.FromBytes(data[16:]) + return lm.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Meta.Size() + 1 + size := 8 + 8 + lm.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1523,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.Meta.EncodeBinary(w.BinWriter) + lm.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) @@ -1585,7 +1657,7 @@ func internalKeyPrefix(key []byte, k string) []byte { key = append(key, 'i') l := len(k) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, k...) return key } @@ -1600,14 +1672,10 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte { key = internalKeyPrefix(key, k) l := len(v) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, v...) - var raw [8]byte - binary.LittleEndian.PutUint64(raw[:], parent) - key = append(key, raw[:]...) - - binary.LittleEndian.PutUint64(raw[:], node) - key = append(key, raw[:]...) + key = binary.LittleEndian.AppendUint64(key, parent) + key = binary.LittleEndian.AppendUint64(key, node) return key } diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 78503bada..ebfd0bcc0 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -1,10 +1,10 @@ package pilorama import ( - "bytes" "context" "errors" "fmt" + "slices" "sort" "strings" @@ -85,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID s.operations = append(s.operations, op) } - mCopy := make([]KeyValue, len(m)) - copy(mCopy, m) + mCopy := slices.Clone(m) op := s.do(&Move{ Parent: node, Meta: Meta{ @@ -112,7 +111,16 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o return s.Apply(op) } -func (f *memoryForest) Init() error { +func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error { + for _, op := range ops { + if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil { + return err + } + } + return nil +} + +func (f *memoryForest) Init(context.Context) error { return nil } @@ -120,11 +128,11 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error { return nil } -func (f *memoryForest) SetMode(mode.Mode) error { +func (f *memoryForest) SetMode(context.Context, mode.Mode) error { return nil } -func (f *memoryForest) Close() error { +func (f *memoryForest) Close(context.Context) error { return nil } func (f *memoryForest) SetParentID(string) {} @@ -156,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, } // TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) { +func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -169,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -192,23 +200,18 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI return nil, start, nil } - sort.Slice(res, func(i, j int) bool { - return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1 - }) + sortByFilename(res) r := mergeNodeInfos(res) for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { - finish := i + count - if len(res) < finish { - finish = len(res) - } + if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { + finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], &last, nil + return r[i:finish], NewCursor(last, 0), nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, &last, nil + return nil, NewCursor(last, 0), nil } // TreeGetChildren implements the Forest interface. @@ -219,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index 001d095c8..844084c55 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -1,11 +1,13 @@ package pilorama import ( + "bytes" "context" "crypto/rand" "fmt" mrand "math/rand" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -28,7 +30,7 @@ var providers = []struct { {"inmemory", func(t testing.TB, _ ...Option) ForestStorage { f := NewMemoryForest() require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, {"bbolt", func(t testing.TB, opts ...Option) ForestStorage { @@ -38,7 +40,7 @@ var providers = []struct { WithMaxBatchSize(1), }, opts...)...) require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, } @@ -59,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) { } func testForestTreeMove(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -123,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) { } func testForestTreeGetChildren(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -192,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { const total = 100_000 d := CIDDescriptor{cnr, 0, 1} - for i := 0; i < total; i++ { + for i := range total { u, err := uuid.NewRandom() if err != nil { b.FailNow() @@ -214,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } b.Run(providers[i].name+",root", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100) if err != nil || len(res) != 100 { b.Fatalf("err %v, count %d", err, len(res)) @@ -222,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } }) b.Run(providers[i].name+",leaf", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100) if err != nil || len(res) != 0 { b.FailNow() @@ -232,6 +234,65 @@ func BenchmarkForestSortedIteration(b *testing.B) { } } +// The issue which we call "BugWithSkip" is easiest to understand when filenames are +// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved. +// The bug happens when we switch between length during listing. +// Thus this test contains numbers from 1 to 2000 and batch size of size 10. +func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) { + for i := range providers { + t.Run(providers[i].name, func(t *testing.T) { + testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t)) + }) + } +} + +func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { + defer func() { require.NoError(t, s.Close(context.Background())) }() + + cid := cidtest.ID() + d := CIDDescriptor{cid, 0, 1} + treeID := "version" + treeAdd := func(t *testing.T, ts int, filename string) { + _, err := s.TreeMove(context.Background(), d, treeID, &Move{ + Child: RootID + uint64(ts), + Parent: RootID, + Meta: Meta{ + Time: Timestamp(ts), + Items: []KeyValue{ + {Key: AttributeFilename, Value: []byte(filename)}, + }, + }, + }) + require.NoError(t, err) + } + + const count = 2000 + treeAdd(t, 1, "") + for i := 1; i < count; i++ { + treeAdd(t, i+1, strconv.Itoa(i+1)) + } + + var result []MultiNodeInfo + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { + res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) + require.NoError(t, err) + result = append(result, res...) + return cursor + } + + const batchSize = 10 + last := treeAppend(t, nil, batchSize) + for i := 1; i < count/batchSize; i++ { + last = treeAppend(t, last, batchSize) + } + require.Len(t, result, count) + require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int { + filenameA := findAttr(a.Meta, AttributeFilename) + filenameB := findAttr(b.Meta, AttributeFilename) + return bytes.Compare(filenameA, filenameB) + })) +} + func TestForest_TreeSortedIteration(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { @@ -241,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) { } func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -267,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) @@ -300,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) { } func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const controlAttr = "control_attr" cid := cidtest.ID() @@ -392,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) { } func testForestTreeDrop(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const cidsSize = 3 var cids [cidsSize]cidSDK.ID @@ -462,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) { } func testForestTreeAdd(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -510,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) { } func testForestTreeAddByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -648,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio t.Run("add a child, then insert a parent removal", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}}) @@ -661,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio }) t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}} testApply(t, s, 11, 10, meta) @@ -731,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ t.Run("expected", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range logs { require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false)) @@ -740,10 +801,10 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ }) s := constructor(t, WithMaxBatchSize(batchSize)) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) - for i := 0; i < batchSize; i++ { + for range batchSize { errG.Go(func() error { return s.TreeApply(ctx, cid, treeID, &logs[2], false) }) @@ -781,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op } s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() t.Run("empty log, no panic", func(t *testing.T) { _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) @@ -822,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) { func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { actual, err := s.TreeExists(context.Background(), cid, treeID) @@ -881,7 +942,7 @@ func TestApplyTricky1(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -944,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -982,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) { // The operations are guaranteed to be applied and returned sorted by `Time`. func prepareRandomTree(nodeCount, opCount int) []Move { ops := make([]Move, nodeCount+opCount) - for i := 0; i < nodeCount; i++ { + for i := range nodeCount { ops[i] = Move{ Parent: 0, Meta: Meta{ @@ -1020,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move { } func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) { - for i := uint64(0); i < uint64(nodeCount); i++ { + for i := range uint64(nodeCount) { expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i) require.NoError(t, err) actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i) @@ -1054,20 +1115,20 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true)) wg := new(sync.WaitGroup) ch := make(chan *Move) - for i := 0; i < batchSize; i++ { + for range batchSize { wg.Add(1) go func() { defer wg.Done() @@ -1084,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ wg.Wait() compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1102,14 +1163,14 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } const iterCount = 200 - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) @@ -1118,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1136,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1172,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1186,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { Child: uint64(r.Intn(benchNodeCount)), } if i != 0 && i%blockSize == 0 { - for j := 0; j < blockSize/2; j++ { + for j := range blockSize / 2 { ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j] } } @@ -1204,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) { cid := cidtest.ID() treeID := "version" ch := make(chan int, b.N) - for i := 0; i < b.N; i++ { + for i := range b.N { ch <- i } @@ -1229,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) { } func testTreeGetByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() treeID := "version" @@ -1250,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) { if mf, ok := s.(*memoryForest); ok { single := mf.treeMap[cid.String()+"/"+treeID] t.Run("test meta", func(t *testing.T) { - for i := 0; i < 6; i++ { + for i := range 6 { require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time) } }) @@ -1308,7 +1369,7 @@ func TestGetTrees(t *testing.T) { } func testTreeGetTrees(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()} d := CIDDescriptor{Position: 0, Size: 1} @@ -1354,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) { } func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { - defer func() { require.NoError(t, f.Close()) }() + defer func() { require.NoError(t, f.Close(context.Background())) }() cnr := cidtest.ID() treeID := "someTree" @@ -1398,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { func TestForest_ListTrees(t *testing.T) { for i := range providers { - i := i t.Run(providers[i].name, func(t *testing.T) { testTreeListTrees(t, providers[i].construct) }) @@ -1432,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op var expected []ContainerIDTreeID treeIDs := []string{"version", "system", "s", "avada kedavra"} - for i := 0; i < count; i++ { + for i := range count { cid := cidtest.ID() treeID := treeIDs[i%len(treeIDs)] expected = append(expected, ContainerIDTreeID{ diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index ec57b9e1f..b035be1e1 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -2,6 +2,8 @@ package pilorama import ( "container/heap" + "slices" + "strings" ) type heapInfo struct { @@ -28,12 +30,13 @@ func (h *filenameHeap) Pop() any { // fixedHeap maintains a fixed number of smallest elements started at some point. type fixedHeap struct { - start *string - count int - h *filenameHeap + start *Cursor + sorted bool + count int + h *filenameHeap } -func newHeap(start *string, count int) *fixedHeap { +func newHeap(start *Cursor, count int) *fixedHeap { h := new(filenameHeap) heap.Init(h) @@ -44,20 +47,50 @@ func newHeap(start *string, count int) *fixedHeap { } } +const amortizationMultiplier = 5 + func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil && filename <= *h.start { - return false + if h.start != nil { + if filename < h.start.GetFilename() { + return false + } else if filename == h.start.GetFilename() { + // A tree may have a lot of nodes with the same filename but different versions so that + // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call + // with the same filename. + pos := slices.Index(id, h.start.GetNode()) + if pos == -1 || pos+1 >= len(id) { + return false + } + id = id[pos+1:] + } } - heap.Push(h.h, heapInfo{id: id, filename: filename}) - if h.h.Len() > h.count { - heap.Remove(h.h, h.h.Len()-1) + + *h.h = append(*h.h, heapInfo{id: id, filename: filename}) + h.sorted = false + + if h.h.Len() > h.count*amortizationMultiplier { + slices.SortFunc(*h.h, func(a, b heapInfo) int { + return strings.Compare(a.filename, b.filename) + }) + *h.h = (*h.h)[:h.count] } return true } func (h *fixedHeap) pop() (heapInfo, bool) { - if h.h.Len() != 0 { - return heap.Pop(h.h).(heapInfo), true + if !h.sorted { + slices.SortFunc(*h.h, func(a, b heapInfo) int { + return strings.Compare(a.filename, b.filename) + }) + if len(*h.h) > h.count { + *h.h = (*h.h)[:h.count] + } + h.sorted = true + } + if len(*h.h) != 0 { + info := (*h.h)[0] + *h.h = (*h.h)[1:] + return info, true } return heapInfo{}, false } diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index c9f5df3b7..28b7faec8 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -1,6 +1,9 @@ package pilorama -import "sort" +import ( + "cmp" + "slices" +) // nodeInfo couples parent and metadata. type nodeInfo struct { @@ -32,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.tree.infoMap[op.Child] = op.Old + s.infoMap[op.Child] = op.Old } else { - delete(s.tree.infoMap, op.Child) + delete(s.infoMap, op.Child) } } @@ -80,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.tree.isAncestor(op.Child, op.Parent) - p, ok := s.tree.infoMap[op.Child] + shouldPut := !s.isAncestor(op.Child, op.Parent) + p, ok := s.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -97,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.tree.infoMap[op.Child] = p + s.infoMap[op.Child] = p return lm } @@ -131,10 +134,10 @@ func (t tree) getChildren(parent Node) []Node { } } - sort.Slice(children, func(i, j int) bool { - a := t.infoMap[children[i]] - b := t.infoMap[children[j]] - return a.Meta.Time < b.Meta.Time + slices.SortFunc(children, func(ci, cj uint64) int { + a := t.infoMap[ci] + b := t.infoMap[cj] + return cmp.Compare(a.Meta.Time, b.Meta.Time) }) return children } @@ -189,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTs Timestamp + var lastTS Timestamp children := t.getChildren(curNode) for i := range children { @@ -197,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTs { + if info.Meta.Time >= lastTS { nodes = append(nodes[:0], children[i]) } } else { diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 61a3849bf..e1f6cd8e7 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -21,6 +21,8 @@ type Forest interface { // TreeApply applies replicated operation from another node. // If background is true, TreeApply will first check whether an operation exists. TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error + // TreeApplyBatch applies replicated operations from another node. + TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error // TreeGetByPath returns all nodes corresponding to the path. // The path is constructed by descending from the root using the values of the // AttributeFilename in meta. @@ -35,7 +37,7 @@ type Forest interface { TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) + TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) @@ -60,10 +62,10 @@ type Forest interface { type ForestStorage interface { // DumpInfo returns information about the pilorama. DumpInfo() Info - Init() error + Init(context.Context) error Open(context.Context, mode.Mode) error - Close() error - SetMode(m mode.Mode) error + Close(context.Context) error + SetMode(context.Context, mode.Mode) error SetParentID(id string) Forest @@ -77,6 +79,38 @@ const ( AttributeVersion = "Version" ) +// Cursor keeps state between function calls for traversing nodes. +// It stores the attributes associated with a previous call, allowing subsequent operations +// to resume traversal from this point rather than starting from the beginning. +type Cursor struct { + // Last traversed filename. + filename string + + // Last traversed node. + node Node +} + +func NewCursor(filename string, node Node) *Cursor { + return &Cursor{ + filename: filename, + node: node, + } +} + +func (c *Cursor) GetFilename() string { + if c == nil { + return "" + } + return c.filename +} + +func (c *Cursor) GetNode() Node { + if c == nil { + return Node(0) + } + return c.node +} + // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go index 01d3da9f0..0c042aa56 100644 --- a/pkg/local_object_storage/pilorama/mode_test.go +++ b/pkg/local_object_storage/pilorama/mode_test.go @@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) require.NoError(t, f.Open(context.Background(), mode.Degraded)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) } diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go index 106ba6ae9..36d347f10 100644 --- a/pkg/local_object_storage/pilorama/multinode.go +++ b/pkg/local_object_storage/pilorama/multinode.go @@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool { return true } +func (r *MultiNodeInfo) LastChild() Node { + return r.Children[len(r.Children)-1] +} + func (n NodeInfo) ToMultiNode() MultiNodeInfo { return MultiNodeInfo{ Children: MultiNode{n.ID}, diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go index 54c2b90a6..eecee1527 100644 --- a/pkg/local_object_storage/pilorama/split_test.go +++ b/pkg/local_object_storage/pilorama/split_test.go @@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) { require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) { + testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) require.NoError(t, err) return res, last diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 364649b50..b4015ae8d 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,9 +34,15 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { return ContainerSizeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerSizeRes{}, err + } + defer release() + size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { - return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err) + return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) } return ContainerSizeRes{ @@ -69,9 +75,15 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerCountRes{}, err + } + defer release() + counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { - return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err) + return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) } return ContainerCountRes{ @@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerSize(ctx, id) } @@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 90d7afdd4..d489b8b0d 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -19,25 +20,25 @@ import ( "golang.org/x/sync/errgroup" ) -func (s *Shard) handleMetabaseFailure(stage string, err error) error { - s.log.Error(logs.ShardMetabaseFailureSwitchingMode, +func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error { + s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode, zap.String("stage", stage), zap.Stringer("mode", mode.ReadOnly), zap.Error(err)) - err = s.SetMode(mode.ReadOnly) + err = s.SetMode(ctx, mode.ReadOnly) if err == nil { return nil } - s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode, + s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode, zap.String("stage", stage), zap.Stringer("mode", mode.DegradedReadOnly), zap.Error(err)) - err = s.SetMode(mode.DegradedReadOnly) + err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly)) + return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly) } return nil } @@ -71,10 +72,10 @@ func (s *Shard) Open(ctx context.Context) error { for j := i + 1; j < len(components); j++ { if err := components[j].Open(ctx, m); err != nil { // Other components must be opened, fail. - return fmt.Errorf("could not open %T: %w", components[j], err) + return fmt.Errorf("open %T: %w", components[j], err) } } - err = s.handleMetabaseFailure("open", err) + err = s.handleMetabaseFailure(ctx, "open", err) if err != nil { return err } @@ -82,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error { break } - return fmt.Errorf("could not open %T: %w", component, err) + return fmt.Errorf("open %T: %w", component, err) } } return nil @@ -90,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error { type metabaseSynchronizer Shard -func (x *metabaseSynchronizer) Init() error { - ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init") +func (x *metabaseSynchronizer) Init(ctx context.Context) error { + ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init") defer span.End() return (*Shard)(x).refillMetabase(ctx) @@ -99,12 +100,48 @@ func (x *metabaseSynchronizer) Init() error { // Init initializes all Shard's components. func (s *Shard) Init(ctx context.Context) error { + m := s.GetMode() + if err := s.initializeComponents(ctx, m); err != nil { + return err + } + + s.updateMetrics(ctx) + + s.gc = &gc{ + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + newEpochChan: make(chan uint64), + newEpochHandlers: &newEpochHandlers{ + cancelFunc: func() {}, + handlers: []newEpochHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, + }, + }, + } + if s.gc.metrics != nil { + s.gc.metrics.SetShardID(s.info.ID.String()) + } + + s.gc.init(ctx) + + s.rb = newRebuilder() + if !m.NoMetabase() { + s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) + } + s.writecacheSealCancel.Store(dummyCancel) + return nil +} + +func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { type initializer interface { - Init() error + Init(context.Context) error } var components []initializer - m := s.GetMode() if !m.NoMetabase() { var initMetabase initializer @@ -131,13 +168,13 @@ func (s *Shard) Init(ctx context.Context) error { } for _, component := range components { - if err := component.Init(); err != nil { + if err := component.Init(ctx); err != nil { if component == s.metaBase { - if errors.Is(err, meta.ErrOutdatedVersion) { + if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) { return fmt.Errorf("metabase initialization: %w", err) } - err = s.handleMetabaseFailure("init", err) + err = s.handleMetabaseFailure(ctx, "init", err) if err != nil { return err } @@ -145,39 +182,9 @@ func (s *Shard) Init(ctx context.Context) error { break } - return fmt.Errorf("could not initialize %T: %w", component, err) + return fmt.Errorf("initialize %T: %w", component, err) } } - - s.updateMetrics(ctx) - - s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - eventChan: make(chan Event), - mEventHandler: map[eventType]*eventHandlers{ - eventNewEpoch: { - cancelFunc: func() {}, - handlers: []eventHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, - }, - }, - }, - } - if s.gc.metrics != nil { - s.gc.metrics.SetShardID(s.info.ID.String()) - } - - s.gc.init(ctx) - - s.rb = newRebuilder(s.rebuildLimiter) - if !m.NoMetabase() { - s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) - } return nil } @@ -196,19 +203,19 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err := s.metaBase.Reset() if err != nil { - return fmt.Errorf("could not reset metabase: %w", err) + return fmt.Errorf("reset metabase: %w", err) } withCount := true totalObjects, err := s.blobStor.ObjectsCount(ctx) if err != nil { - s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err)) + s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err)) withCount = false } eg, egCtx := errgroup.WithContext(ctx) - if s.cfg.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.cfg.refillMetabaseWorkersCount) + if s.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.refillMetabaseWorkersCount) } var completedCount uint64 @@ -245,12 +252,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err = errors.Join(egErr, itErr) if err != nil { - return fmt.Errorf("could not put objects to the meta: %w", err) + return fmt.Errorf("put objects to the meta: %w", err) } err = s.metaBase.SyncCounters() if err != nil { - return fmt.Errorf("could not sync object counters: %w", err) + return fmt.Errorf("sync object counters: %w", err) } success = true @@ -261,12 +268,27 @@ func (s *Shard) refillMetabase(ctx context.Context) error { func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error { obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - s.log.Warn(logs.ShardCouldNotUnmarshalObject, + s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), - zap.String("err", err.Error())) + zap.Error(err)) return nil } + hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0 + + var isIndexedContainer bool + if hasIndexedAttribute { + info, err := s.containerInfo.Info(ctx, addr.Container()) + if err != nil { + return err + } + if info.Removed { + s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr)) + return nil + } + isIndexedContainer = info.Indexed + } + var err error switch obj.Type() { case objectSDK.TypeTombstone: @@ -282,6 +304,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, var mPrm meta.PutPrm mPrm.SetObject(obj) mPrm.SetStorageID(descriptor) + mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer) _, err = s.metaBase.Put(ctx, mPrm) if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) { @@ -293,7 +316,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { var lock objectSDK.Lock if err := lock.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal lock content: %w", err) + return fmt.Errorf("unmarshal lock content: %w", err) } locked := make([]oid.ID, lock.NumberOfMembers()) @@ -303,7 +326,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err id, _ := obj.ID() err := s.metaBase.Lock(ctx, cnr, id, locked) if err != nil { - return fmt.Errorf("could not lock objects: %w", err) + return fmt.Errorf("lock objects: %w", err) } return nil } @@ -312,7 +335,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object tombstone := objectSDK.NewTombstone() if err := tombstone.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal tombstone content: %w", err) + return fmt.Errorf("unmarshal tombstone content: %w", err) } tombAddr := object.AddressOf(obj) @@ -333,23 +356,26 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object _, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - return fmt.Errorf("could not inhume objects: %w", err) + return fmt.Errorf("inhume objects: %w", err) } return nil } // Close releases all Shard's components. -func (s *Shard) Close() error { +func (s *Shard) Close(ctx context.Context) error { + unlock := s.lockExclusive() if s.rb != nil { - s.rb.Stop(s.log) + s.rb.Stop(ctx, s.log) } - components := []interface{ Close() error }{} + var components []interface{ Close(context.Context) error } if s.pilorama != nil { components = append(components, s.pilorama) } if s.hasWriteCache() { + prev := s.writecacheSealCancel.Swap(notInitializedCancel) + prev.cancel() // no need to wait: writecache.Seal and writecache.Close lock the same mutex components = append(components, s.writeCache) } @@ -357,15 +383,23 @@ func (s *Shard) Close() error { var lastErr error for _, component := range components { - if err := component.Close(); err != nil { + if err := component.Close(ctx); err != nil { lastErr = err - s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err)) + s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + + unlock() + + // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. + // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { - s.gc.stop() + s.gc.stop(ctx) } return lastErr @@ -387,18 +421,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { unlock := s.lockExclusive() defer unlock() - s.rb.Stop(s.log) + s.rb.Stop(ctx, s.log) if !s.info.Mode.NoMetabase() { defer func() { s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) }() } - ok, err := s.metaBase.Reload(c.metaOpts...) + ok, err := s.metaBase.Reload(ctx, c.metaOpts...) if err != nil { if errors.Is(err, meta.ErrDegradedMode) { - s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) - _ = s.setMode(mode.DegradedReadOnly) + s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) + _ = s.setMode(ctx, mode.DegradedReadOnly) } return err } @@ -410,15 +444,19 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { // config after the node was updated. err = s.refillMetabase(ctx) } else { - err = s.metaBase.Init() + err = s.metaBase.Init(ctx) } if err != nil { - s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) - _ = s.setMode(mode.DegradedReadOnly) + s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) + _ = s.setMode(ctx, mode.DegradedReadOnly) return err } } - return s.setMode(c.info.Mode) + if c.opsLimiter != nil { + s.opsLimiter.Close() + s.opsLimiter = c.opsLimiter + } + return s.setMode(ctx, c.info.Mode) } func (s *Shard) lockExclusive() func() { @@ -428,6 +466,9 @@ func (s *Shard) lockExclusive() func() { cancelGC := val.(context.CancelFunc) cancelGC() } + if c := s.writecacheSealCancel.Load(); c != nil { + c.cancel() + } s.m.Lock() s.setModeRequested.Store(false) return s.m.Unlock diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index 44fee1636..6d2cd7137 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.ReadWrite, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) // Metabase can be opened in read-only => start in ReadOnly mode. allowedMode.Store(int64(os.O_RDONLY)) @@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.Error(t, sh.SetMode(mode.ReadWrite)) + require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite)) require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) // Metabase is corrupted => start in DegradedReadOnly mode. allowedMode.Store(math.MaxInt64) @@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.DegradedReadOnly, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) } func TestRefillMetabaseCorrupted(t *testing.T) { @@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), @@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { putPrm.SetObject(obj) _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) addr := object.AddressOf(obj) // This is copied from `fstree.treePath()` to avoid exporting function just for tests. @@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { getPrm.SetAddress(addr) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) } func TestRefillMetabase(t *testing.T) { @@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), @@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) { locked := make([]oid.ID, 1, 2) locked[0] = oidtest.ID() cnrLocked := cidtest.ID() - for i := uint64(0); i < objNum; i++ { + for range objNum { obj := objecttest.Object() obj.SetType(objectSDK.TypeRegular) @@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) { phyBefore := c.Phy logicalBefore := c.Logic - err = sh.Close() + err = sh.Close(context.Background()) require.NoError(t, err) sh = New( @@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) { // initialize Blobstor require.NoError(t, sh.Init(context.Background())) - defer sh.Close() + defer sh.Close(context.Background()) checkAllObjs(false) checkObj(object.AddressOf(tombObj), nil) diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index b3bc6a30b..8dc1f0522 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index c898fdf41..0101817a8 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -55,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return DeleteRes{}, err + } + defer release() + result := DeleteRes{} for _, addr := range prm.addr { select { @@ -95,7 +100,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr } _, err := s.writeCache.Head(ctx, addr) if err == nil { - s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr)) + s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr)) return fmt.Errorf("object %s must be flushed from writecache", addr) } if client.IsErrObjectNotFound(err) { @@ -110,10 +115,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error res, err := s.metaBase.StorageID(ctx, sPrm) if err != nil { - s.log.Debug(logs.StorageIDRetrievalFailure, + s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } storageID := res.StorageID() @@ -130,10 +134,9 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error _, err = s.blobStor.Delete(ctx, delPrm) if err != nil && !client.IsErrObjectNotFound(err) { - s.log.Debug(logs.ObjectRemovalFailureBlobStor, + s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } return nil diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go index 9f205fa5d..c9ce93bc5 100644 --- a/pkg/local_object_storage/shard/delete_test.go +++ b/pkg/local_object_storage/shard/delete_test.go @@ -3,7 +3,6 @@ package shard import ( "context" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" @@ -38,7 +37,7 @@ func TestShard_Delete_BigObject(t *testing.T) { func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() @@ -58,19 +57,14 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) require.NoError(t, err) if hasWriteCache { - sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}) - require.Eventually(t, func() bool { - _, err = sh.Delete(context.Background(), delPrm) - return err == nil - }, 30*time.Second, 10*time.Millisecond) - } else { - _, err = sh.Delete(context.Background(), delPrm) - require.NoError(t, err) + require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})) } + _, err = sh.Delete(context.Background(), delPrm) + require.NoError(t, err) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err)) diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index b5a9604b4..2c11b6b01 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -5,7 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -16,7 +18,7 @@ type ExistsPrm struct { // Exists option to set object checked for existence. Address oid.Address // Exists option to set parent object checked for existence. - ParentAddress oid.Address + ECParentAddress oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -51,16 +53,25 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() - var exists bool - var locked bool - var err error - s.m.RLock() defer s.m.RUnlock() if s.info.Mode.Disabled() { return ExistsRes{}, ErrShardDisabled - } else if s.info.Mode.NoMetabase() { + } else if s.info.EvacuationInProgress { + return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ExistsRes{}, err + } + defer release() + + var exists bool + var locked bool + + if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address @@ -70,7 +81,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { } else { var existsPrm meta.ExistsPrm existsPrm.SetAddress(prm.Address) - existsPrm.SetParent(prm.ParentAddress) + existsPrm.SetECParent(prm.ECParentAddress) var res meta.ExistsRes res, err = s.metaBase.Exists(ctx, existsPrm) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index d605746e8..a262a52cb 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -6,11 +6,13 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -31,41 +33,14 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -// Event represents class of external events. -type Event interface { - typ() eventType -} +type newEpochHandler func(context.Context, uint64) -type eventType int - -const ( - _ eventType = iota - eventNewEpoch -) - -type newEpoch struct { - epoch uint64 -} - -func (e newEpoch) typ() eventType { - return eventNewEpoch -} - -// EventNewEpoch returns new epoch event. -func EventNewEpoch(e uint64) Event { - return newEpoch{ - epoch: e, - } -} - -type eventHandler func(context.Context, Event) - -type eventHandlers struct { +type newEpochHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []eventHandler + handlers []newEpochHandler } type gcRunResult struct { @@ -107,10 +82,10 @@ type gc struct { remover func(context.Context) gcRunResult - // eventChan is used only for listening for the new epoch event. + // newEpochChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - eventChan chan Event - mEventHandler map[eventType]*eventHandlers + newEpochChan chan uint64 + newEpochHandlers *newEpochHandlers } type gcCfg struct { @@ -131,7 +106,7 @@ type gcCfg struct { func defaultGCCfg() gcCfg { return gcCfg{ removerInterval: 10 * time.Second, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), workerPoolInit: func(int) util.WorkerPool { return nil }, @@ -140,16 +115,8 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - sz := 0 - - for _, v := range gc.mEventHandler { - sz += len(v.handlers) - } - - if sz > 0 { - gc.workerPool = gc.workerPoolInit(sz) - } - + gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) go gc.listenEvents(ctx) @@ -161,14 +128,14 @@ func (gc *gc) listenEvents(ctx context.Context) { for { select { case <-gc.stopChannel: - gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel) + gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel) return case <-ctx.Done(): - gc.log.Warn(logs.ShardStopEventListenerByContext) + gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.eventChan: + case event, ok := <-gc.newEpochChan: if !ok { - gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel) + gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return } @@ -177,43 +144,38 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, event Event) { - v, ok := gc.mEventHandler[event.typ()] - if !ok { - return - } - - v.cancelFunc() - v.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() var runCtx context.Context - runCtx, v.cancelFunc = context.WithCancel(ctx) + runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) - v.prevGroup.Add(len(v.handlers)) + gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) - for i := range v.handlers { + for i := range gc.newEpochHandlers.handlers { select { case <-ctx.Done(): return default: } - h := v.handlers[i] + h := gc.newEpochHandlers.handlers[i] err := gc.workerPool.Submit(func() { - defer v.prevGroup.Done() - h(runCtx, event) + defer gc.newEpochHandlers.prevGroup.Done() + h(runCtx, epoch) }) if err != nil { - gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool, - zap.String("error", err.Error()), + gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, + zap.Error(err), ) - v.prevGroup.Done() + gc.newEpochHandlers.prevGroup.Done() } } } -func (gc *gc) releaseResources() { +func (gc *gc) releaseResources(ctx context.Context) { if gc.workerPool != nil { gc.workerPool.Release() } @@ -222,7 +184,7 @@ func (gc *gc) releaseResources() { // because it is possible that we are close it earlier than stop writing. // It is ok to keep it opened. - gc.log.Debug(logs.ShardGCIsStopped) + gc.log.Debug(ctx, logs.ShardGCIsStopped) } func (gc *gc) tickRemover(ctx context.Context) { @@ -236,10 +198,10 @@ func (gc *gc) tickRemover(ctx context.Context) { case <-ctx.Done(): // Context canceled earlier than we start to close shards. // It make sense to stop collecting garbage by context too. - gc.releaseResources() + gc.releaseResources(ctx) return case <-gc.stopChannel: - gc.releaseResources() + gc.releaseResources(ctx) return case <-timer.C: startedAt := time.Now() @@ -258,13 +220,16 @@ func (gc *gc) tickRemover(ctx context.Context) { } } -func (gc *gc) stop() { +func (gc *gc) stop(ctx context.Context) { gc.onceStop.Do(func() { close(gc.stopChannel) }) - gc.log.Info(logs.ShardWaitingForGCWorkersToStop) + gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() + + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects @@ -286,8 +251,47 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } - s.log.Debug(logs.ShardGCRemoveGarbageStarted) - defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted) + s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) + defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) + + buf, err := s.getGarbage(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, + zap.Error(err), + ) + + return + } else if len(buf) == 0 { + result.success = true + return + } + + var deletePrm DeletePrm + deletePrm.SetAddresses(buf...) + + // delete accumulated objects + res, err := s.delete(ctx, deletePrm, true) + + result.deleted = res.deleted + result.failedToDelete = uint64(len(buf)) - res.deleted + result.success = true + + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects, + zap.Error(err), + ) + result.success = false + } + + return +} + +func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() buf := make([]oid.Address, 0, s.rmBatchSize) @@ -308,47 +312,20 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return nil }) - // iterate over metabase's objects with GC mark - // (no more than s.rmBatchSize objects) - err := s.metaBase.IterateOverGarbage(ctx, iterPrm) - if err != nil { - s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed, - zap.String("error", err.Error()), - ) - - return - } else if len(buf) == 0 { - result.success = true - return + if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { + return nil, err } - var deletePrm DeletePrm - deletePrm.SetAddresses(buf...) - - // delete accumulated objects - res, err := s.delete(ctx, deletePrm, true) - - result.deleted = res.deleted - result.failedToDelete = uint64(len(buf)) - res.deleted - result.success = true - - if err != nil { - s.log.Warn(logs.ShardCouldNotDeleteTheObjects, - zap.String("error", err.Error()), - ) - result.success = false - } - - return + return buf, nil } func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) return } -func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { +func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -356,8 +333,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -366,7 +343,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -396,7 +373,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err)) } } @@ -414,24 +391,25 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } + s.handleExpiredObjectsUnsafe(ctx, expired) +} + +func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { + select { + case <-ctx.Done(): + return + default: + } + expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { - s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) return } - var inhumePrm meta.InhumePrm - - inhumePrm.SetAddresses(expired...) - inhumePrm.SetGCMark() - - // inhume the collected objects - res, err := s.metaBase.Inhume(ctx, inhumePrm) + res, err := s.inhumeGC(ctx, expired) if err != nil { - s.log.Warn(logs.ShardCouldNotInhumeTheObjects, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) return } @@ -449,6 +427,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -462,7 +446,20 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } -func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { +func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return meta.InhumeRes{}, err + } + defer release() + + var inhumePrm meta.InhumePrm + inhumePrm.SetAddresses(addrs...) + inhumePrm.SetGCMark() + return s.metaBase.Inhume(ctx, inhumePrm) +} + +func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -470,11 +467,10 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() - epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) - log.Debug(logs.ShardStartedExpiredTombstonesHandling) - defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling) + log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) + defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling) const tssDeleteBatch = 50 tss := make([]meta.TombstonedObject, 0, tssDeleteBatch) @@ -492,22 +488,29 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { }) for { - log.Debug(logs.ShardIteratingTombstones) + log.Debug(ctx, logs.ShardIteratingTombstones) s.m.RLock() if s.info.Mode.NoMetabase() { - s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) + s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) s.m.RUnlock() return } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + var release qos.ReleaseFunc + release, err = s.opsLimiter.ReadRequest(ctx) if err != nil { - log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + s.m.RUnlock() + return + } + err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release() + if err != nil { + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() - return } @@ -524,7 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } - log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) + log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) if len(tssExp) > 0 { s.expiredTombstonesCallback(ctx, tssExp) } @@ -535,7 +538,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { +func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -543,8 +546,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -554,14 +557,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -575,7 +578,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) } @@ -584,7 +587,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err)) } } @@ -596,7 +599,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -612,12 +621,11 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err } + defer release() return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -627,28 +635,22 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid // // Does not modify tss. func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } - // Mark tombstones as garbage. - var pInhume meta.InhumePrm - - tsAddrs := make([]oid.Address, 0, len(tss)) - for _, ts := range tss { - tsAddrs = append(tsAddrs, ts.Tombstone()) - } - - pInhume.SetGCMark() - pInhume.SetAddresses(tsAddrs...) - - // inhume tombstones - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.InhumeTombstones(ctx, tss) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) return } @@ -663,26 +665,27 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) i++ } - - // drop just processed expired tombstones - // from graveyard - err = s.metaBase.DropGraves(ctx, tss) - if err != nil { - s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) - } } // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return + } + + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } unlocked, err := s.metaBase.FreeLockedBy(lockers) + release() if err != nil { - s.log.Warn(logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), - ) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } @@ -690,13 +693,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err = s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.Inhume(ctx, pInhume) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) return } @@ -718,7 +723,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) { expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked) if err != nil { - s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) + s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) return } @@ -726,47 +731,57 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjects(ctx, expiredUnlocked) + s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. -func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { - if s.GetMode().NoMetabase() { +func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } - _, err := s.metaBase.FreeLockedBy(lockers) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + _, err = s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } } -// NotificationChannel returns channel for shard events. -func (s *Shard) NotificationChannel() chan<- Event { - return s.gc.eventChan +// NotificationChannel returns channel for new epoch events. +func (s *Shard) NotificationChannel() chan<- uint64 { + return s.gc.newEpochChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() - epoch := e.(newEpoch).epoch - - s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) s.collectExpiredContainerSizeMetrics(ctx, epoch) s.collectExpiredContainerCountMetrics(ctx, epoch) } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { - ids, err := s.metaBase.ZeroSizeContainers(ctx) + release, err := s.opsLimiter.ReadRequest(ctx) if err != nil { - s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } + ids, err := s.metaBase.ZeroSizeContainers(ctx) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return } if len(ids) == 0 { @@ -776,9 +791,15 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { - ids, err := s.metaBase.ZeroCountContainers(ctx) + release, err := s.opsLimiter.ReadRequest(ctx) if err != nil { - s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } + ids, err := s.metaBase.ZeroCountContainers(ctx) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return } if len(ids) == 0 { diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 3993593ad..54d2f1510 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), @@ -61,8 +62,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { meta.WithEpochState(epochState{}), ), WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) @@ -73,13 +74,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { return pool }), WithGCRemoverSleepInterval(1 * time.Second), + WithDisabledGC(), } sh = New(opts...) - sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} } require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index a438b5def..f512a488a 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -5,13 +5,13 @@ import ( "errors" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" @@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { return util.NewPseudoWorkerPool() // synchronous event processing })}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() @@ -69,12 +69,12 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err), "expired object must be deleted") + require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired object must be deleted") } func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { @@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { return util.NewPseudoWorkerPool() // synchronous event processing })}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(objectSDK.TypeLock) @@ -165,10 +165,10 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err), "expired complex object must be deleted on epoch after lock expires") + require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") } func TestGCDropsObjectInhumedFromWritecache(t *testing.T) { @@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool additionalShardOptions: []Option{WithDisabledGC()}, wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() obj := testutil.GenerateObjectWithSize(1024) @@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) { additionalShardOptions: []Option{WithDisabledGC()}, wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() obj := testutil.GenerateObjectWithSize(1024) diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 2e7c84bcd..28f8912be 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -10,7 +10,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -27,8 +26,9 @@ type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, // GetPrm groups the parameters of Get operation. type GetPrm struct { - addr oid.Address - skipMeta bool + addr oid.Address + skipMeta bool + skipEvacCheck bool } // GetRes groups the resulting values of Get operation. @@ -50,6 +50,11 @@ func (p *GetPrm) SetIgnoreMeta(ignore bool) { p.skipMeta = ignore } +// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress. +func (p *GetPrm) SkipEvacCheck(val bool) { + p.skipEvacCheck = val +} + // Object returns the requested object. func (r GetRes) Object() *objectSDK.Object { return r.obj @@ -85,6 +90,10 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, ErrShardDisabled } + if s.info.EvacuationInProgress && !prm.skipEvacCheck { + return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) { var getPrm common.GetPrm getPrm.Address = prm.addr @@ -102,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return GetRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) @@ -134,7 +149,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound)) } } else { - s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) + s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) } if s.hasWriteCache() { @@ -143,16 +158,14 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta return res, false, err } if client.IsErrObjectNotFound(err) { - s.log.Debug(logs.ShardObjectIsMissingInWritecache, + s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } else { - s.log.Error(logs.ShardFailedToFetchObjectFromWritecache, + s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } } if skipMeta || mErr != nil { @@ -165,7 +178,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta mExRes, err := s.metaBase.StorageID(ctx, mPrm) if err != nil { - return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err) + return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err) } storageID := mExRes.StorageID() diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go index 8a7c6972d..837991b73 100644 --- a/pkg/local_object_storage/shard/get_test.go +++ b/pkg/local_object_storage/shard/get_test.go @@ -5,11 +5,9 @@ import ( "context" "errors" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -32,7 +30,7 @@ func TestShard_Get(t *testing.T) { func testShardGet(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var putPrm PutPrm var getPrm GetPrm @@ -49,7 +47,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(obj)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.Equal(t, obj, res.Object()) }) @@ -67,7 +65,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(obj)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.Equal(t, obj, res.Object()) }) @@ -95,13 +93,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(child)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.True(t, binaryEqual(child, res.Object())) getPrm.SetAddress(object.AddressOf(parent)) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) var si *objectSDK.SplitInfoError require.True(t, errors.As(err, &si)) @@ -115,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) { }) } -func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) { - res, err := sh.Get(context.Background(), getPrm) - if hasWriteCache { - require.Eventually(t, func() bool { - if client.IsErrObjectNotFound(err) { - res, err = sh.Get(context.Background(), getPrm) - } - return !client.IsErrObjectNotFound(err) - }, time.Second, time.Millisecond*100) - } - return res, err -} - // binary equal is used when object contains empty lists in the structure and // requre.Equal fails on comparing and []{} lists. func binaryEqual(a, b *objectSDK.Object) bool { diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index 9d5d31260..34b8290d6 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -4,7 +4,9 @@ import ( "context" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" @@ -70,10 +72,21 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { res, err = s.Get(ctx, getPrm) obj = res.Object() } else { + s.m.RLock() + defer s.m.RUnlock() + if s.info.EvacuationInProgress { + return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } var headParams meta.GetPrm headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) + release, limitErr := s.opsLimiter.ReadRequest(ctx) + if limitErr != nil { + return HeadRes{}, limitErr + } + defer release() + var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go index 1f4631993..deb3019df 100644 --- a/pkg/local_object_storage/shard/head_test.go +++ b/pkg/local_object_storage/shard/head_test.go @@ -4,11 +4,9 @@ import ( "context" "errors" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" @@ -30,7 +28,7 @@ func TestShard_Head(t *testing.T) { func testShardHead(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var putPrm PutPrm var headPrm HeadPrm @@ -46,7 +44,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { headPrm.SetAddress(object.AddressOf(obj)) - res, err := testHead(t, sh, headPrm, hasWriteCache) + res, err := sh.Head(context.Background(), headPrm) require.NoError(t, err) require.Equal(t, obj.CutPayload(), res.Object()) }) @@ -74,7 +72,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { var siErr *objectSDK.SplitInfoError - _, err = testHead(t, sh, headPrm, hasWriteCache) + _, err = sh.Head(context.Background(), headPrm) require.True(t, errors.As(err, &siErr)) headPrm.SetAddress(object.AddressOf(parent)) @@ -85,16 +83,3 @@ func testShardHead(t *testing.T, hasWriteCache bool) { require.Equal(t, parent.CutPayload(), head.Object()) }) } - -func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) { - res, err := sh.Head(context.Background(), headPrm) - if hasWriteCache { - require.Eventually(t, func() bool { - if client.IsErrObjectNotFound(err) { - res, err = sh.Head(context.Background(), headPrm) - } - return !client.IsErrObjectNotFound(err) - }, time.Second, time.Millisecond*100) - } - return res, err -} diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 2fe68d270..7391adef2 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -1,11 +1,11 @@ package shard import ( + "context" "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/mr-tron/base58" "go.uber.org/zap" ) @@ -31,12 +31,12 @@ func (s *Shard) ID() *ID { } // UpdateID reads shard ID saved in the metabase and updates it if it is missing. -func (s *Shard) UpdateID() (err error) { +func (s *Shard) UpdateID(ctx context.Context) (err error) { var idFromMetabase []byte modeDegraded := s.GetMode().NoMetabase() if !modeDegraded { - if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil { - err = fmt.Errorf("failed to read shard id from metabase: %w", err) + if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { + err = fmt.Errorf("read shard id from metabase: %w", err) } } @@ -45,14 +45,12 @@ func (s *Shard) UpdateID() (err error) { } shardID := s.info.ID.String() - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.SetShardID(shardID) - } + s.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } - s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))} + s.log = s.log.With(zap.Stringer("shard_id", s.info.ID)) s.metaBase.SetLogger(s.log) s.blobStor.SetLogger(s.log) if s.hasWriteCache() { @@ -63,10 +61,11 @@ func (s *Shard) UpdateID() (err error) { if s.pilorama != nil { s.pilorama.SetParentID(s.info.ID.String()) } + s.opsLimiter.SetParentID(s.info.ID.String()) if len(idFromMetabase) == 0 && !modeDegraded { - if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil { - err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr)) + if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { + err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr)) } } return diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go index 1051ab3db..f01796ec7 100644 --- a/pkg/local_object_storage/shard/info.go +++ b/pkg/local_object_storage/shard/info.go @@ -16,6 +16,9 @@ type Info struct { // Shard mode. Mode mode.Mode + // True when evacuation is in progress. + EvacuationInProgress bool + // Information about the metabase. MetaBaseInfo meta.Info diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 746177c3a..c0fd65f4b 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" @@ -82,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return InhumeRes{}, err + } + defer release() + if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) @@ -109,9 +114,8 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrLockObjectRemoval } - s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase, - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, + zap.Error(err), ) s.m.RUnlock() diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go index 82754568f..1421f0e18 100644 --- a/pkg/local_object_storage/shard/inhume_test.go +++ b/pkg/local_object_storage/shard/inhume_test.go @@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) { func testShardInhume(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() @@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) { _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) require.NoError(t, err) _, err = sh.Inhume(context.Background(), inhPrm) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 08ea81a0c..af87981ca 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -34,6 +33,30 @@ func (r ListContainersRes) Containers() []cid.ID { return r.containers } +// IterateOverContainersPrm contains parameters for IterateOverContainers operation. +type IterateOverContainersPrm struct { + // Handler function executed upon containers in db. + Handler func(context.Context, objectSDK.Type, cid.ID) error +} + +// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type IterateOverObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID + // Handler function executed upon objects in db. + Handler func(context.Context, *objectcore.Info) error +} + +// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation. +type CountAliveObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID +} + // ListWithCursorPrm contains parameters for ListWithCursor operation. type ListWithCursorPrm struct { count uint32 @@ -83,9 +106,15 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, err + } + defer release() + lst, err := s.metaBase.Containers(ctx) if err != nil { - return res, fmt.Errorf("can't list stored containers: %w", err) + return res, fmt.Errorf("list stored containers: %w", err) } filters := objectSDK.NewSearchFilters() @@ -98,10 +127,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase if err != nil { - s.log.Debug(logs.ShardCantSelectAllObjects, + s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } @@ -123,9 +151,15 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListContainersRes{}, err + } + defer release() + containers, err := s.metaBase.Containers(ctx) if err != nil { - return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err) + return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) } return ListContainersRes{ @@ -151,12 +185,18 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListWithCursorRes{}, err + } + defer release() + var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) res, err := s.metaBase.ListWithCursor(ctx, metaPrm) if err != nil { - return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err) + return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err) } return ListWithCursorRes{ @@ -164,3 +204,96 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List cursor: res.Cursor(), }, nil } + +// IterateOverContainers lists physical containers presented in shard. +func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { + _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + var metaPrm meta.IterateOverContainersPrm + metaPrm.Handler = prm.Handler + err = s.metaBase.IterateOverContainers(ctx, metaPrm) + if err != nil { + return fmt.Errorf("iterate over containers: %w", err) + } + + return nil +} + +// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name. +func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { + _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + var metaPrm meta.IterateOverObjectsInContainerPrm + metaPrm.ContainerID = prm.ContainerID + metaPrm.ObjectType = prm.ObjectType + metaPrm.Handler = prm.Handler + err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + if err != nil { + return fmt.Errorf("iterate over objects: %w", err) + } + + return nil +} + +// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. +func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { + _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket") + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return 0, ErrDegradedMode + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + + var metaPrm meta.CountAliveObjectsInContainerPrm + metaPrm.ObjectType = prm.ObjectType + metaPrm.ContainerID = prm.ContainerID + count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) + if err != nil { + return 0, fmt.Errorf("count alive objects in bucket: %w", err) + } + + return count, nil +} diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 8a49a36fd..139b2e316 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) { t.Run("without write cache", func(t *testing.T) { t.Parallel() sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() testShardList(t, sh) }) t.Run("with write cache", func(t *testing.T) { t.Parallel() shWC := newShard(t, true) - defer func() { require.NoError(t, shWC.Close()) }() + defer func() { require.NoError(t, shWC.Close(context.Background())) }() testShardList(t, shWC) }) } @@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) { var errG errgroup.Group errG.SetLimit(C * N) - for i := 0; i < C; i++ { + for range C { errG.Go(func() error { cnr := cidtest.ID() - for j := 0; j < N; j++ { + for range N { errG.Go(func() error { obj := testutil.GenerateObjectWithCID(cnr) testutil.AddPayload(obj, 1<<2) diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 4a8d89d63..9c392fdac 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - err := s.metaBase.Lock(ctx, idCnr, locker, locked) + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() + var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -72,10 +84,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return res.Locked(), nil } -// GetLocked return lock id's of the provided object. Not found object is +// GetLocks return lock id's of the provided object. Not found object is // considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked", +func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), attribute.String("address", addr.EncodeToString()), @@ -86,5 +98,12 @@ func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, erro if m.NoMetabase() { return nil, ErrDegradedMode } - return s.metaBase.GetLocked(ctx, addr) + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 9ce95feb1..3878a65cd 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() + l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(&logger.Logger{Logger: zap.NewNop()}), + WithLogger(l), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { @@ -53,8 +54,8 @@ func TestShard_Lock(t *testing.T) { meta.WithPath(filepath.Join(rootPath, "meta")), meta.WithEpochState(epochState{}), ), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), } @@ -62,7 +63,7 @@ func TestShard_Lock(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) @@ -148,7 +149,7 @@ func TestShard_Lock(t *testing.T) { func TestShard_IsLocked(t *testing.T) { sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go new file mode 100644 index 000000000..087ba42ef --- /dev/null +++ b/pkg/local_object_storage/shard/metrics.go @@ -0,0 +1,60 @@ +package shard + +import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + +// MetricsWriter is an interface that must store shard's metrics. +type MetricsWriter interface { + // SetObjectCounter must set object counter taking into account object type. + SetObjectCounter(objectType string, v uint64) + // AddToObjectCounter must update object counter taking into account object + // type. + // Negative parameter must decrease the counter. + AddToObjectCounter(objectType string, delta int) + // AddToContainerSize must add a value to the container size. + // Value can be negative. + AddToContainerSize(cnr string, value int64) + // AddToPayloadSize must add a value to the payload size. + // Value can be negative. + AddToPayloadSize(value int64) + // IncObjectCounter must increment shard's object counter taking into account + // object type. + IncObjectCounter(objectType string) + // SetShardID must set (update) the shard identifier that will be used in + // metrics. + SetShardID(id string) + // SetMode set mode of shard. + SetMode(mode mode.Mode) + // SetContainerObjectsCount sets container object count. + SetContainerObjectsCount(cnrID string, objectType string, value uint64) + // IncContainerObjectsCount increments container object count. + IncContainerObjectsCount(cnrID string, objectType string) + // SubContainerObjectsCount subtracts container object count. + SubContainerObjectsCount(cnrID string, objectType string, value uint64) + // IncRefillObjectsCount increments refill objects count. + IncRefillObjectsCount(path string, size int, success bool) + // SetRefillPercent sets refill percent. + SetRefillPercent(path string, percent uint32) + // SetRefillStatus sets refill status. + SetRefillStatus(path string, status string) + // SetEvacuationInProgress sets evacuation status + SetEvacuationInProgress(value bool) +} + +type noopMetrics struct{} + +var _ MetricsWriter = noopMetrics{} + +func (noopMetrics) SetObjectCounter(string, uint64) {} +func (noopMetrics) AddToObjectCounter(string, int) {} +func (noopMetrics) AddToContainerSize(string, int64) {} +func (noopMetrics) AddToPayloadSize(int64) {} +func (noopMetrics) IncObjectCounter(string) {} +func (noopMetrics) SetShardID(string) {} +func (noopMetrics) SetMode(mode.Mode) {} +func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {} +func (noopMetrics) IncContainerObjectsCount(string, string) {} +func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {} +func (noopMetrics) IncRefillObjectsCount(string, int, bool) {} +func (noopMetrics) SetRefillPercent(string, uint32) {} +func (noopMetrics) SetRefillStatus(string, string) {} +func (noopMetrics) SetEvacuationInProgress(bool) {} diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 38d465f31..5230dcad0 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -17,6 +17,7 @@ import ( cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" ) @@ -34,7 +35,7 @@ type metricsStore struct { refillStatus string } -func NewMetricStore() *metricsStore { +func newMetricStore() *metricsStore { return &metricsStore{ objCounters: map[string]uint64{ "phy": 0, @@ -192,21 +193,24 @@ func (m *metricsStore) SetRefillStatus(_ string, status string) { m.refillStatus = status } +func (m *metricsStore) SetEvacuationInProgress(bool) { +} + func TestCounters(t *testing.T) { t.Parallel() dir := t.TempDir() sh, mm := shardWithMetrics(t, dir) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() - sh.SetMode(mode.ReadOnly) + sh.SetMode(context.Background(), mode.ReadOnly) require.Equal(t, mode.ReadOnly, mm.mode) - sh.SetMode(mode.ReadWrite) + sh.SetMode(context.Background(), mode.ReadWrite) require.Equal(t, mode.ReadWrite, mm.mode) const objNumber = 10 oo := make([]*objectSDK.Object, objNumber) - for i := 0; i < objNumber; i++ { + for i := range objNumber { oo[i] = testutil.GenerateObject() } @@ -248,7 +252,7 @@ func TestCounters(t *testing.T) { var prm PutPrm - for i := 0; i < objNumber; i++ { + for i := range objNumber { prm.SetObject(oo[i]) _, err := sh.Put(context.Background(), prm) @@ -269,7 +273,7 @@ func TestCounters(t *testing.T) { var prm InhumePrm inhumedNumber := objNumber / 4 - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { prm.MarkAsGarbage(objectcore.AddressOf(oo[i])) _, err := sh.Inhume(context.Background(), prm) @@ -305,19 +309,21 @@ func TestCounters(t *testing.T) { t.Run("inhume_TS", func(t *testing.T) { var prm InhumePrm - ts := objectcore.AddressOf(testutil.GenerateObject()) phy := mm.getObjectCounter(physical) logic := mm.getObjectCounter(logical) custom := mm.getObjectCounter(user) inhumedNumber := int(phy / 4) - prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...) + for _, o := range addrFromObjs(oo[:inhumedNumber]) { + ts := oidtest.Address() + ts.SetContainer(o.Container()) + prm.SetTarget(ts, o) + _, err := sh.Inhume(context.Background(), prm) + require.NoError(t, err) + } - _, err := sh.Inhume(context.Background(), prm) - require.NoError(t, err) - - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { cid, ok := oo[i].ContainerID() require.True(t, ok) expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize()) @@ -398,7 +404,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), @@ -419,7 +425,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { func addrFromObjs(oo []*objectSDK.Object) []oid.Address { aa := make([]oid.Address, len(oo)) - for i := 0; i < len(oo); i++ { + for i := range len(oo) { aa[i] = objectcore.AddressOf(oo[i]) } diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index 1bab57448..901528976 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -1,6 +1,8 @@ package shard import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -18,19 +20,21 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode") // // Returns any error encountered that did not allow // setting shard mode. -func (s *Shard) SetMode(m mode.Mode) error { +func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error { unlock := s.lockExclusive() defer unlock() - return s.setMode(m) + return s.setMode(ctx, m) } -func (s *Shard) setMode(m mode.Mode) error { - s.log.Info(logs.ShardSettingShardMode, +func (s *Shard) setMode(ctx context.Context, m mode.Mode) error { + s.log.Info(ctx, logs.ShardSettingShardMode, zap.Stringer("old_mode", s.info.Mode), zap.Stringer("new_mode", m)) - components := []interface{ SetMode(mode.Mode) error }{ + components := []interface { + SetMode(context.Context, mode.Mode) error + }{ s.metaBase, s.blobStor, } @@ -58,18 +62,16 @@ func (s *Shard) setMode(m mode.Mode) error { if !m.Disabled() { for i := range components { - if err := components[i].SetMode(m); err != nil { + if err := components[i].SetMode(ctx, m); err != nil { return err } } } s.info.Mode = m - if s.metricsWriter != nil { - s.metricsWriter.SetMode(s.info.Mode) - } + s.metricsWriter.SetMode(s.info.Mode) - s.log.Info(logs.ShardShardModeSetSuccessfully, + s.log.Info(ctx, logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) return nil } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index d7a9e7012..f8cb00a31 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -17,7 +17,8 @@ import ( // PutPrm groups the parameters of Put operation. type PutPrm struct { - obj *objectSDK.Object + obj *objectSDK.Object + indexAttributes bool } // PutRes groups the resulting values of Put operation. @@ -28,6 +29,10 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) { p.obj = obj } +func (p *PutPrm) SetIndexAttributes(v bool) { + p.indexAttributes = v +} + // Put saves the object in shard. // // Returns any error encountered that @@ -62,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return PutRes{}, err + } + defer release() + // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() @@ -70,13 +81,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { } if err != nil || !tryCache { if err != nil { - s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, - zap.String("err", err.Error())) + s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, + zap.Error(err)) } res, err = s.blobStor.Put(ctx, putPrm) if err != nil { - return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) + return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err) } } @@ -84,11 +95,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var pPrm meta.PutPrm pPrm.SetObject(prm.obj) pPrm.SetStorageID(res.StorageID) + pPrm.SetIndexAttributes(prm.indexAttributes) res, err := s.metaBase.Put(ctx, pPrm) if err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) + return PutRes{}, fmt.Errorf("put object to metabase: %w", err) } if res.Inserted { diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 9491543c4..443689104 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -87,6 +87,10 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { s.m.RLock() defer s.m.RUnlock() + if s.info.EvacuationInProgress { + return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + if s.info.Mode.Disabled() { return RngRes{}, ErrShardDisabled } @@ -127,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return RngRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index cc73db316..06fe9f511 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), @@ -94,7 +95,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { }), }, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go new file mode 100644 index 000000000..20f1f2b6f --- /dev/null +++ b/pkg/local_object_storage/shard/rebuild.go @@ -0,0 +1,193 @@ +package shard + +import ( + "context" + "errors" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +var ErrRebuildInProgress = errors.New("shard rebuild in progress") + +type rebuildTask struct { + concurrencyLimiter common.RebuildLimiter + fillPercent int +} + +type rebuilder struct { + mtx *sync.Mutex + wg *sync.WaitGroup + cancel func() + done chan struct{} + tasks chan rebuildTask +} + +func newRebuilder() *rebuilder { + return &rebuilder{ + mtx: &sync.Mutex{}, + wg: &sync.WaitGroup{}, + tasks: make(chan rebuildTask), + } +} + +func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) { + r.mtx.Lock() + defer r.mtx.Unlock() + + if r.done != nil { + return // already started + } + ctx, cancel := context.WithCancel(ctx) + r.cancel = cancel + r.done = make(chan struct{}) + r.wg.Add(1) + go func() { + defer r.wg.Done() + for { + select { + case <-r.done: + return + case t, ok := <-r.tasks: + if !ok { + continue + } + runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) + } + } + }() +} + +func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, + fillPercent int, concLimiter common.RebuildLimiter, +) { + select { + case <-ctx.Done(): + return + default: + } + log.Info(ctx, logs.BlobstoreRebuildStarted) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { + log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) + } else { + log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) + } +} + +func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, +) error { + select { + case <-ctx.Done(): + return ctx.Err() + case r.tasks <- rebuildTask{ + concurrencyLimiter: limiter, + fillPercent: fillPercent, + }: + return nil + default: + return ErrRebuildInProgress + } +} + +func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) { + r.mtx.Lock() + defer r.mtx.Unlock() + + if r.done != nil { + close(r.done) + } + if r.cancel != nil { + r.cancel() + } + r.wg.Wait() + r.cancel = nil + r.done = nil + log.Info(ctx, logs.BlobstoreRebuildStopped) +} + +var errMBIsNotAvailable = errors.New("metabase is not available") + +type mbStorageIDUpdate struct { + mb *meta.DB +} + +func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if u.mb == nil { + return errMBIsNotAvailable + } + + var prm meta.UpdateStorageIDPrm + prm.SetAddress(addr) + prm.SetStorageID(storageID) + _, err := u.mb.UpdateStorageID(ctx, prm) + return err +} + +type RebuildPrm struct { + ConcurrencyLimiter common.ConcurrencyLimiter + TargetFillPercent uint32 +} + +func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)), + )) + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.ReadOnly() { + return ErrReadOnlyMode + } + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + + limiter := &rebuildLimiter{ + concurrencyLimiter: p.ConcurrencyLimiter, + rateLimiter: s.opsLimiter, + } + return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) +} + +var _ common.RebuildLimiter = (*rebuildLimiter)(nil) + +type rebuildLimiter struct { + concurrencyLimiter common.ConcurrencyLimiter + rateLimiter qos.Limiter +} + +func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + return r.concurrencyLimiter.AcquireWorkSlot(ctx) +} + +func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.ReadRequest(ctx) + return common.ReleaseFunc(release), err +} + +func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.WriteRequest(ctx) + return common.ReleaseFunc(release), err +} diff --git a/pkg/local_object_storage/shard/rebuild_limiter.go b/pkg/local_object_storage/shard/rebuild_limiter.go deleted file mode 100644 index efc21837c..000000000 --- a/pkg/local_object_storage/shard/rebuild_limiter.go +++ /dev/null @@ -1,13 +0,0 @@ -package shard - -import "context" - -type RebuildWorkerLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -type noopRebuildLimiter struct{} - -func (l *noopRebuildLimiter) AcquireWorkSlot(context.Context) error { return nil } -func (l *noopRebuildLimiter) ReleaseWorkSlot() {} diff --git a/pkg/local_object_storage/shard/rebuilder.go b/pkg/local_object_storage/shard/rebuilder.go deleted file mode 100644 index f18573c57..000000000 --- a/pkg/local_object_storage/shard/rebuilder.go +++ /dev/null @@ -1,98 +0,0 @@ -package shard - -import ( - "context" - "errors" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type rebuilder struct { - mtx *sync.Mutex - wg *sync.WaitGroup - cancel func() - limiter RebuildWorkerLimiter -} - -func newRebuilder(l RebuildWorkerLimiter) *rebuilder { - return &rebuilder{ - mtx: &sync.Mutex{}, - wg: &sync.WaitGroup{}, - cancel: nil, - limiter: l, - } -} - -func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) { - r.mtx.Lock() - defer r.mtx.Unlock() - - r.start(ctx, bs, mb, log) -} - -func (r *rebuilder) start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) { - if r.cancel != nil { - r.stop(log) - } - ctx, cancel := context.WithCancel(ctx) - r.cancel = cancel - r.wg.Add(1) - go func() { - defer r.wg.Done() - - log.Info(logs.BlobstoreRebuildStarted) - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, r.limiter); err != nil { - log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err)) - } else { - log.Info(logs.BlobstoreRebuildCompletedSuccessfully) - } - }() -} - -func (r *rebuilder) Stop(log *logger.Logger) { - r.mtx.Lock() - defer r.mtx.Unlock() - - r.stop(log) -} - -func (r *rebuilder) stop(log *logger.Logger) { - if r.cancel == nil { - return - } - - r.cancel() - r.wg.Wait() - r.cancel = nil - log.Info(logs.BlobstoreRebuildStopped) -} - -var errMBIsNotAvailable = errors.New("metabase is not available") - -type mbStorageIDUpdate struct { - mb *meta.DB -} - -func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if u.mb == nil { - return errMBIsNotAvailable - } - - var prm meta.UpdateStorageIDPrm - prm.SetAddress(addr) - prm.SetStorageID(storageID) - _, err := u.mb.UpdateStorageID(ctx, prm) - return err -} diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go index 509ccaaa6..d90343265 100644 --- a/pkg/local_object_storage/shard/refill_test.go +++ b/pkg/local_object_storage/shard/refill_test.go @@ -34,11 +34,11 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)}, }) - defer func() { require.NoError(b, sh.Close()) }() + defer func() { require.NoError(b, sh.Close(context.Background())) }() var putPrm PutPrm - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj @@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, err) } - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") obj.SetID(oidtest.ID()) @@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, err) } - require.NoError(b, sh.Close()) + require.NoError(b, sh.Close(context.Background())) require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path)) require.NoError(b, sh.Open(context.Background())) @@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, sh.Init(context.Background())) - require.NoError(b, sh.Close()) + require.NoError(b, sh.Close(context.Background())) } diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go index 7dacbfa6c..e563f390b 100644 --- a/pkg/local_object_storage/shard/reload_test.go +++ b/pkg/local_object_storage/shard/reload_test.go @@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) { WithMetaBaseOptions(metaOpts...), WithPiloramaOptions( pilorama.WithPath(filepath.Join(p, "pilorama"))), - WithMetricsWriter(NewMetricStore()), + WithMetricsWriter(newMetricStore()), } sh := New(opts...) @@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) { require.NoError(t, sh.Init(context.Background())) defer func() { - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) }() objects := make([]objAddr, 5) diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 1615f5fbe..fbc751e26 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -15,8 +15,9 @@ import ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + isIndexedContainer bool } // SelectRes groups the resulting values of Select operation. @@ -25,8 +26,9 @@ type SelectRes struct { } // SetContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) SetContainerID(cnr cid.ID) { +func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) { p.cnr = cnr + p.isIndexedContainer = isIndexedContainer } // SetFilters is a Select option to set the object filters. @@ -58,13 +60,20 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, nil + } + defer release() + var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) + selectPrm.SetUseAttributeIndex(prm.isIndexedContainer) mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { - return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) + return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err) } return SelectRes{ diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 94f22feb5..d89b56266 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,8 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -37,8 +39,9 @@ type Shard struct { rb *rebuilder - gcCancel atomic.Value - setModeRequested atomic.Bool + gcCancel atomic.Value + setModeRequested atomic.Bool + writecacheSealCancel atomic.Pointer[writecacheSealCanceler] } // Option represents Shard's constructor option. @@ -56,48 +59,6 @@ type DeletedLockCallback func(context.Context, []oid.Address) // EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers. type EmptyContainersCallback func(context.Context, []cid.ID) -// MetricsWriter is an interface that must store shard's metrics. -type MetricsWriter interface { - // SetObjectCounter must set object counter taking into account object type. - SetObjectCounter(objectType string, v uint64) - // AddToObjectCounter must update object counter taking into account object - // type. - // Negative parameter must decrease the counter. - AddToObjectCounter(objectType string, delta int) - // AddToContainerSize must add a value to the container size. - // Value can be negative. - AddToContainerSize(cnr string, value int64) - // AddToPayloadSize must add a value to the payload size. - // Value can be negative. - AddToPayloadSize(value int64) - // IncObjectCounter must increment shard's object counter taking into account - // object type. - IncObjectCounter(objectType string) - // SetShardID must set (update) the shard identifier that will be used in - // metrics. - SetShardID(id string) - // SetReadonly must set shard mode. - SetMode(mode mode.Mode) - // IncErrorCounter increment error counter. - IncErrorCounter() - // ClearErrorCounter clear error counter. - ClearErrorCounter() - // DeleteShardMetrics deletes shard metrics from registry. - DeleteShardMetrics() - // SetContainerObjectsCount sets container object count. - SetContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncContainerObjectsCount increments container object count. - IncContainerObjectsCount(cnrID string, objectType string) - // SubContainerObjectsCount subtracts container object count. - SubContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncRefillObjectsCount increments refill objects count. - IncRefillObjectsCount(path string, size int, success bool) - // SetRefillPercent sets refill percent. - SetRefillPercent(path string, percent uint32) - // SetRefillStatus sets refill status. - SetRefillStatus(path string, status string) -} - type cfg struct { m sync.RWMutex @@ -135,20 +96,23 @@ type cfg struct { metricsWriter MetricsWriter - reportErrorFunc func(selfID string, message string, err error) + reportErrorFunc func(ctx context.Context, selfID string, message string, err error) - rebuildLimiter RebuildWorkerLimiter + containerInfo container.InfoProvider + + opsLimiter qos.Limiter } func defaultCfg() *cfg { return &cfg{ rmBatchSize: 100, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), gcCfg: defaultGCCfg(), - reportErrorFunc: func(string, string, error) {}, - rebuildLimiter: &noopRebuildLimiter{}, + reportErrorFunc: func(context.Context, string, string, error) {}, zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, + metricsWriter: noopMetrics{}, + opsLimiter: qos.NewNoopLimiter(), } } @@ -170,8 +134,8 @@ func New(opts ...Option) *Shard { tsSource: c.tsSource, } - reportFunc := func(msg string, err error) { - s.reportErrorFunc(s.ID().String(), msg, err) + reportFunc := func(ctx context.Context, msg string, err error) { + s.reportErrorFunc(ctx, s.ID().String(), msg, err) } s.blobStor.SetReportErrorFunc(reportFunc) @@ -190,6 +154,7 @@ func New(opts ...Option) *Shard { } s.fillInfo() + s.writecacheSealCancel.Store(notInitializedCancel) return s } @@ -240,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l + c.gcCfg.log = l.WithTag(logger.TagGC) } } @@ -253,7 +218,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.cfg.useWriteCache + return s.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -356,7 +321,7 @@ func WithGCMetrics(v GCMectrics) Option { // WithReportErrorFunc returns option to specify callback for handling storage-related errors // in the background workers. -func WithReportErrorFunc(f func(selfID string, message string, err error)) Option { +func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option { return func(c *cfg) { c.reportErrorFunc = f } @@ -378,14 +343,6 @@ func WithExpiredCollectorWorkerCount(count int) Option { } } -// WithRebuildWorkerLimiter return option to set concurrent -// workers count of storage rebuild operation. -func WithRebuildWorkerLimiter(l RebuildWorkerLimiter) Option { - return func(c *cfg) { - c.rebuildLimiter = l - } -} - // WithDisabledGC disables GC. // For testing purposes only. func WithDisabledGC() Option { @@ -408,16 +365,29 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option { } } -func (s *Shard) fillInfo() { - s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() - s.cfg.info.Mode = s.GetMode() +// WithContainerInfoProvider returns option to set container info provider. +func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { + return func(c *cfg) { + c.containerInfo = containerInfo + } +} - if s.cfg.useWriteCache { - s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() +func WithLimiter(l qos.Limiter) Option { + return func(c *cfg) { + c.opsLimiter = l + } +} + +func (s *Shard) fillInfo() { + s.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.info.BlobStorInfo = s.blobStor.DumpInfo() + s.info.Mode = s.GetMode() + + if s.useWriteCache { + s.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() + s.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -435,13 +405,13 @@ const ( ) func (s *Shard) updateMetrics(ctx context.Context) { - if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() { + if s.GetMode().NoMetabase() { return } cc, err := s.metaBase.ObjectCounters() if err != nil { - s.log.Warn(logs.ShardMetaObjectCounterRead, + s.log.Warn(ctx, logs.ShardMetaObjectCounterRead, zap.Error(err), ) @@ -454,7 +424,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { cnrList, err := s.metaBase.Containers(ctx) if err != nil { - s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err)) + s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err)) return } @@ -463,7 +433,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { for i := range cnrList { size, err := s.metaBase.ContainerSize(cnrList[i]) if err != nil { - s.log.Warn(logs.ShardMetaCantReadContainerSize, + s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize, zap.String("cid", cnrList[i].EncodeToString()), zap.Error(err)) continue @@ -476,7 +446,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { contCount, err := s.metaBase.ContainerCounters(ctx) if err != nil { - s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err)) + s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err)) return } for contID, count := range contCount.Counts { @@ -484,86 +454,69 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.cfg.metricsWriter.SetMode(s.info.Mode) + s.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.IncObjectCounter(physical) - s.cfg.metricsWriter.IncObjectCounter(logical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) - if isUser { - s.cfg.metricsWriter.IncObjectCounter(user) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) - } + s.metricsWriter.IncObjectCounter(physical) + s.metricsWriter.IncObjectCounter(logical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + if isUser { + s.metricsWriter.IncObjectCounter(user) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { - s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) + if v > 0 { + s.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { - s.cfg.metricsWriter.SetObjectCounter(typ, v) + if v > 0 { + s.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { - if s.cfg.metricsWriter == nil { - return - } - for cnrID, count := range byCnr { if count.Phy > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { - if s.cfg.metricsWriter != nil && size != 0 { - s.cfg.metricsWriter.AddToContainerSize(cnr, size) + if size != 0 { + s.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { - if s.cfg.metricsWriter != nil && size != 0 { - s.cfg.metricsWriter.AddToPayloadSize(size) + if size != 0 { + s.metricsWriter.AddToPayloadSize(size) } } func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { + if v > 0 { s.metricsWriter.SetContainerObjectsCount(cnr, typ, v) } } -func (s *Shard) IncErrorCounter() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.IncErrorCounter() - } -} - -func (s *Shard) ClearErrorCounter() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.ClearErrorCounter() - } -} - -func (s *Shard) DeleteShardMetrics() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.DeleteShardMetrics() - } +func (s *Shard) SetEvacuationInProgress(val bool) { + s.m.Lock() + defer s.m.Unlock() + s.info.EvacuationInProgress = val + s.metricsWriter.SetEvacuationInProgress(val) } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index 73ba2e82b..84be71c4d 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), @@ -89,8 +90,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))), WithWriteCache(enableWriteCache), WithWriteCacheOptions(o.wcOpts), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go index de00eabd1..b1232707f 100644 --- a/pkg/local_object_storage/shard/shutdown_test.go +++ b/pkg/local_object_storage/shard/shutdown_test.go @@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) { }) } require.NoError(t, errG.Wait()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts}) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var getPrm GetPrm diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 26dc8ec1e..db361a8bd 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -103,9 +113,46 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } +// TreeApplyBatch implements the pilorama.Forest interface. +func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + if s.pilorama == nil { + return ErrPiloramaDisabled + } + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.ReadOnly() { + return ErrReadOnlyMode + } + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) +} + // TreeGetByPath implements the pilorama.Forest interface. func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath", @@ -130,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -155,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Meta{}, 0, err + } + defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -180,11 +237,16 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } // TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), @@ -204,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, last, err + } + defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -229,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Move{}, err + } + defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -253,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -276,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeList(ctx, cid) } @@ -299,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -323,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -351,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -375,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -396,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -425,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index 05e014d29..9edb89df8 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -4,11 +4,24 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" ) +var ( + dummyCancel = &writecacheSealCanceler{cancel: func() {}} + notInitializedCancel = &writecacheSealCanceler{cancel: func() {}} + errWriteCacheSealing = errors.New("writecache is already sealing or shard is not initialized") +) + +type writecacheSealCanceler struct { + cancel context.CancelFunc +} + // FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation. type FlushWriteCachePrm struct { ignoreErrors bool @@ -54,11 +67,20 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } type SealWriteCachePrm struct { IgnoreErrors bool + Async bool + RestoreMode bool + Shrink bool } // SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode. @@ -67,6 +89,7 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { trace.WithAttributes( attribute.String("shard_id", s.ID().String()), attribute.Bool("ignore_errors", p.IgnoreErrors), + attribute.Bool("restore_mode", p.RestoreMode), )) defer span.End() @@ -74,15 +97,65 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return errWriteCacheDisabled } + if p.Async { + ctx = context.WithoutCancel(ctx) + } + ctx, cancel := context.WithCancel(ctx) + canceler := &writecacheSealCanceler{cancel: cancel} + if !s.writecacheSealCancel.CompareAndSwap(dummyCancel, canceler) { + return errWriteCacheSealing + } s.m.RLock() - defer s.m.RUnlock() + cleanup := func() { + s.m.RUnlock() + s.writecacheSealCancel.Store(dummyCancel) + } if s.info.Mode.ReadOnly() { + cleanup() return ErrReadOnlyMode } if s.info.Mode.NoMetabase() { + cleanup() return ErrDegradedMode } - return s.writeCache.Seal(ctx, p.IgnoreErrors) + if !p.Async { + defer cleanup() + } + prm := writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink} + if p.Async { + started := make(chan struct{}) + go func() { + close(started) + defer cleanup() + + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + defer release() + + s.log.Info(ctx, logs.StartedWritecacheSealAsync) + if err := s.writeCache.Seal(ctx, prm); err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + s.log.Info(ctx, logs.WritecacheSealCompletedAsync) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-started: + return nil + } + } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + return s.writeCache.Seal(ctx, prm) } diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go index c1c0e88b3..fd85b4501 100644 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go @@ -2,6 +2,7 @@ package benchmark import ( "context" + "fmt" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -27,15 +28,33 @@ func BenchmarkWritecachePar(b *testing.B) { }) } +func BenchmarkWriteAfterDelete(b *testing.B) { + const payloadSize = 32 << 10 + const parallel = 25 + + cache := newCache(b) + benchmarkPutPrepare(b, cache) + b.Run(fmt.Sprintf("%dB_before", payloadSize), func(b *testing.B) { + b.SetParallelism(parallel) + benchmarkRunPar(b, cache, payloadSize) + }) + require.NoError(b, cache.Flush(context.Background(), false, false)) + b.Run(fmt.Sprintf("%dB_after", payloadSize), func(b *testing.B) { + b.SetParallelism(parallel) + benchmarkRunPar(b, cache, payloadSize) + }) + require.NoError(b, cache.Close(context.Background())) +} + func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() ctx := context.Background() objGen := testutil.RandObjGenerator{ObjSize: size} b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { obj := objGen.Next() rawData, err := obj.Marshal() require.NoError(b, err, "marshaling object") @@ -52,8 +71,12 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() + benchmarkRunPar(b, cache, size) +} + +func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) { ctx := context.Background() b.ResetTimer() @@ -77,7 +100,7 @@ func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) { require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening") - require.NoError(b, cache.Init(), "initializing") + require.NoError(b, cache.Init(context.Background()), "initializing") } type testMetabase struct{} @@ -95,6 +118,5 @@ func newCache(b *testing.B) writecache.Cache { writecache.WithBlobstor(bs), writecache.WithMetabase(testMetabase{}), writecache.WithMaxCacheSize(256<<30), - writecache.WithSmallObjectSize(128<<10), ) } diff --git a/pkg/local_object_storage/writecache/cachebbolt.go b/pkg/local_object_storage/writecache/cache.go similarity index 65% rename from pkg/local_object_storage/writecache/cachebbolt.go rename to pkg/local_object_storage/writecache/cache.go index cdd4ed442..ee709ea73 100644 --- a/pkg/local_object_storage/writecache/cachebbolt.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -2,81 +2,67 @@ package writecache import ( "context" - "os" + "fmt" "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.etcd.io/bbolt" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" ) type cache struct { options - // mtx protects statistics, counters and compressFlags. - mtx sync.RWMutex - mode mode.Mode modeMtx sync.RWMutex - // compressFlags maps address of a big object to boolean value indicating - // whether object should be compressed. - compressFlags map[string]struct{} - // flushCh is a channel with objects to flush. flushCh chan objectInfo // cancel is cancel function, protected by modeMtx in Close. cancel atomic.Value // wg is a wait group for flush workers. wg sync.WaitGroup - // store contains underlying database. - store // fsTree contains big files stored directly on file-system. fsTree *fstree.FSTree + // counter contains atomic counters for the number of objects stored in cache. + counter *fstree.SimpleCounter } // wcStorageType is used for write-cache operations logging. const wcStorageType = "write-cache" type objectInfo struct { - addr string - data []byte - obj *objectSDK.Object + addr oid.Address + size uint64 } const ( - defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB - defaultSmallObjectSize = 32 * 1024 // 32 KiB - defaultMaxCacheSize = 1 << 30 // 1 GiB + defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB + defaultMaxCacheSize = 1 << 30 // 1 GiB ) -var ( - defaultBucket = []byte{0} - dummyCanceler context.CancelFunc = func() {} -) +var dummyCanceler context.CancelFunc = func() {} // New creates new writecache instance. func New(opts ...Option) Cache { c := &cache{ flushCh: make(chan objectInfo), mode: mode.Disabled, + counter: fstree.NewSimpleCounter(), - compressFlags: make(map[string]struct{}), options: options{ - log: &logger.Logger{Logger: zap.NewNop()}, - maxObjectSize: defaultMaxObjectSize, - smallObjectSize: defaultSmallObjectSize, - workersCount: defaultFlushWorkersCount, - maxCacheSize: defaultMaxCacheSize, - maxBatchSize: bbolt.DefaultMaxBatchSize, - maxBatchDelay: bbolt.DefaultMaxBatchDelay, - openFile: os.OpenFile, - metrics: DefaultMetrics(), + log: logger.NewLoggerWrapper(zap.NewNop()), + maxObjectSize: defaultMaxObjectSize, + workersCount: defaultFlushWorkersCount, + maxCacheSize: defaultMaxCacheSize, + metrics: DefaultMetrics(), + flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, + qosLimiter: qos.NewNoopLimiter(), }, } @@ -110,21 +96,24 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { if err != nil { return metaerr.Wrap(err) } - - return metaerr.Wrap(c.initCounters()) + c.initCounters() + return nil } // Init runs necessary services. -func (c *cache) Init() error { +func (c *cache) Init(ctx context.Context) error { c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode)) - ctx, cancel := context.WithCancel(context.Background()) + if err := c.flushAndDropBBoltDB(ctx); err != nil { + return fmt.Errorf("flush previous version write-cache database: %w", err) + } + ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache c.cancel.Store(cancel) c.runFlushLoop(ctx) return nil } // Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op. -func (c *cache) Close() error { +func (c *cache) Close(ctx context.Context) error { if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil { cancelValue.(context.CancelFunc)() } @@ -140,10 +129,10 @@ func (c *cache) Close() error { defer c.modeMtx.Unlock() var err error - if c.db != nil { - err = c.db.Close() + if c.fsTree != nil { + err = c.fsTree.Close(ctx) if err != nil { - c.db = nil + c.fsTree = nil } } c.metrics.Close() diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go index b1a0511ee..94a0a40db 100644 --- a/pkg/local_object_storage/writecache/delete.go +++ b/pkg/local_object_storage/writecache/delete.go @@ -2,7 +2,6 @@ package writecache import ( "context" - "math" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -10,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -45,46 +43,11 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error { return ErrDegraded } - saddr := addr.EncodeToString() - - var dataSize int - _ = c.db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - dataSize = len(b.Get([]byte(saddr))) - return nil - }) - - if dataSize > 0 { - storageType = StorageTypeDB - var recordDeleted bool - err := c.db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - key := []byte(saddr) - recordDeleted = b.Get(key) != nil - err := b.Delete(key) - return err - }) - if err != nil { - return err - } - storagelog.Write(c.log, - storagelog.AddressField(saddr), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("db DELETE"), - ) - if recordDeleted { - c.objCounters.cDB.Add(math.MaxUint64) - c.estimateCacheSize() - } - deleted = true - return nil - } - storageType = StorageTypeFSTree _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr}) if err == nil { - storagelog.Write(c.log, - storagelog.AddressField(saddr), + storagelog.Write(ctx, c.log, + storagelog.AddressField(addr.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree DELETE"), ) diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index da7feda9a..893d27ba2 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -1,37 +1,33 @@ package writecache import ( - "bytes" "context" "errors" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" - "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) const ( - // flushBatchSize is amount of keys which will be read from cache to be flushed - // to the main storage. It is used to reduce contention between cache put - // and cache persist. - flushBatchSize = 512 // defaultFlushWorkersCount is number of workers for putting objects in main storage. defaultFlushWorkersCount = 20 // defaultFlushInterval is default time interval between successive flushes. - defaultFlushInterval = time.Second + defaultFlushInterval = 10 * time.Second ) var errIterationCompleted = errors.New("iteration completed") @@ -41,139 +37,119 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } - for i := 0; i < c.workersCount; i++ { - c.wg.Add(1) - go c.workerFlushSmall(ctx) - } - - c.wg.Add(1) - go func() { - c.workerFlushBig(ctx) - c.wg.Done() - }() - + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) + fl := newFlushLimiter(c.flushSizeLimit) c.wg.Add(1) go func() { defer c.wg.Done() - - tt := time.NewTimer(defaultFlushInterval) - defer tt.Stop() - - for { - select { - case <-tt.C: - c.flushSmallObjects(ctx) - tt.Reset(defaultFlushInterval) - c.estimateCacheSize() - case <-ctx.Done(): - return - } - } + c.pushToFlushQueue(ctx, fl) }() -} -func (c *cache) flushSmallObjects(ctx context.Context) { - var lastKey []byte - for { - select { - case <-ctx.Done(): - return - default: - } - - var m []objectInfo - - c.modeMtx.RLock() - if c.readOnly() { - c.modeMtx.RUnlock() - time.Sleep(time.Second) - continue - } - - // We put objects in batches of fixed size to not interfere with main put cycle a lot. - _ = c.db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - cs := b.Cursor() - - var k, v []byte - - if len(lastKey) == 0 { - k, v = cs.First() - } else { - k, v = cs.Seek(lastKey) - if bytes.Equal(k, lastKey) { - k, v = cs.Next() - } - } - - for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() { - if len(lastKey) == len(k) { - copy(lastKey, k) - } else { - lastKey = bytes.Clone(k) - } - - m = append(m, objectInfo{ - addr: string(k), - data: bytes.Clone(v), - }) - } - return nil - }) - - var count int - for i := range m { - obj := objectSDK.New() - if err := obj.Unmarshal(m[i].data); err != nil { - continue - } - m[i].obj = obj - - count++ - select { - case c.flushCh <- m[i]: - case <-ctx.Done(): - c.modeMtx.RUnlock() - return - } - } - - c.modeMtx.RUnlock() - if count == 0 { - break - } - - c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache, - zap.Int("count", count), - zap.String("start", base58.Encode(lastKey))) + for range c.workersCount { + c.wg.Add(1) + go c.workerFlush(ctx, fl) } } -func (c *cache) workerFlushBig(ctx context.Context) { - tick := time.NewTicker(defaultFlushInterval * 10) +func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { + stopf := context.AfterFunc(ctx, func() { + fl.close() + }) + defer stopf() + + tick := time.NewTicker(defaultFlushInterval) for { select { case <-tick.C: c.modeMtx.RLock() if c.readOnly() || c.noMetabase() { c.modeMtx.RUnlock() - break + continue } - _ = c.flushFSTree(ctx, true) + release, err := c.qosLimiter.ReadRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) + c.modeMtx.RUnlock() + continue + } + err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { + if err := fl.acquire(oi.DataSize); err != nil { + return err + } + select { + case c.flushCh <- objectInfo{ + addr: oi.Address, + size: oi.DataSize, + }: + return nil + case <-ctx.Done(): + fl.release(oi.DataSize) + return ctx.Err() + } + }) + release() + if err != nil { + c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) + } c.modeMtx.RUnlock() + + // counter changed by fstree + c.estimateCacheSize() case <-ctx.Done(): return } } } -func (c *cache) reportFlushError(msg string, addr string, err error) { +func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { + defer c.wg.Done() + + var objInfo objectInfo + for { + select { + case objInfo = <-c.flushCh: + c.flushIfAnObjectExistsWorker(ctx, objInfo, fl) + case <-ctx.Done(): + return + } + } +} + +func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { + defer fl.release(objInfo.size) + + release, err := c.qosLimiter.WriteRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) + return + } + defer release() + res, err := c.fsTree.Get(ctx, common.GetPrm{ + Address: objInfo.addr, + }) + if err != nil { + if !client.IsErrObjectNotFound(err) { + c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) + } + return + } + + err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree) + if err != nil { + // Error is handled in flushObject. + return + } + + c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData))) +} + +func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) { if c.reportError != nil { - c.reportError(msg, err) + c.reportError(ctx, msg, err) } else { - c.log.Error(msg, + c.log.Error(ctx, msg, zap.String("address", addr), zap.Error(err)) } @@ -188,7 +164,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { var obj objectSDK.Object err := obj.Unmarshal(e.ObjectData) if err != nil { - c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) + c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) if ignoreErrors { return nil } @@ -197,13 +173,10 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree) if err != nil { - if ignoreErrors { - return nil - } return err } - c.deleteFromDisk(ctx, e.Address) + c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData))) return nil } @@ -211,29 +184,6 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { return err } -// workerFlushSmall writes small objects to the main storage. -func (c *cache) workerFlushSmall(ctx context.Context) { - defer c.wg.Done() - - var objInfo objectInfo - for { - // Give priority to direct put. - select { - case objInfo = <-c.flushCh: - case <-ctx.Done(): - return - } - - err := c.flushObject(ctx, objInfo.obj, objInfo.data, StorageTypeDB) - if err != nil { - // Error is handled in flushObject. - continue - } - - c.deleteFromDB(objInfo.addr, true) - } -} - // flushObject is used to write object directly to the main storage. func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error { var err error @@ -252,7 +202,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b if err != nil { if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) && !errors.Is(err, blobstor.ErrNoPlaceFound) { - c.reportFlushError(logs.FSTreeCantFushObjectBlobstor, + c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor, addr.EncodeToString(), err) } return err @@ -264,7 +214,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b _, err = c.metabase.UpdateStorageID(ctx, updPrm) if err != nil { - c.reportFlushError(logs.FSTreeCantUpdateID, + c.reportFlushError(ctx, logs.FSTreeCantUpdateID, addr.EncodeToString(), err) } return err @@ -291,7 +241,7 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error { if seal { m := c.mode | mode.ReadOnly - if err := c.setMode(ctx, m, ignoreErrors); err != nil { + if err := c.setMode(ctx, m, setModePrm{ignoreErrors: ignoreErrors}); err != nil { return err } c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) @@ -300,74 +250,5 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error { } func (c *cache) flush(ctx context.Context, ignoreErrors bool) error { - if err := c.flushFSTree(ctx, ignoreErrors); err != nil { - return err - } - - var last string - for { - batch, err := c.readNextDBBatch(ignoreErrors, last) - if err != nil { - return err - } - if len(batch) == 0 { - break - } - for _, item := range batch { - var obj objectSDK.Object - if err := obj.Unmarshal(item.data); err != nil { - c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, item.address, metaerr.Wrap(err)) - if ignoreErrors { - continue - } - return err - } - - if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil { - return err - } - c.deleteFromDB(item.address, false) - } - last = batch[len(batch)-1].address - } - return nil -} - -type batchItem struct { - data []byte - address string -} - -func (c *cache) readNextDBBatch(ignoreErrors bool, last string) ([]batchItem, error) { - const batchSize = 100 - var batch []batchItem - err := c.db.View(func(tx *bbolt.Tx) error { - var addr oid.Address - - b := tx.Bucket(defaultBucket) - cs := b.Cursor() - for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() { - sa := string(k) - if sa == last { - continue - } - if err := addr.DecodeString(sa); err != nil { - c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, sa, metaerr.Wrap(err)) - if ignoreErrors { - continue - } - return err - } - - batch = append(batch, batchItem{data: bytes.Clone(data), address: sa}) - if len(batch) == batchSize { - return errIterationCompleted - } - } - return nil - }) - if err == nil || errors.Is(err, errIterationCompleted) { - return batch, nil - } - return nil, err + return c.flushFSTree(ctx, ignoreErrors) } diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 3c951bebe..7fc84657c 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -19,19 +19,17 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" "go.uber.org/zap" ) func TestFlush(t *testing.T) { testlogger := test.NewLogger(t) - createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs MainStorage, opts ...Option) Cache { + createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache { return New( append([]Option{ WithLogger(testlogger), WithPath(filepath.Join(t.TempDir(), "writecache")), - WithSmallObjectSize(smallSize), WithMetabase(mb), WithBlobstor(bs), WithDisableBackgroundFlush(), @@ -40,38 +38,13 @@ func TestFlush(t *testing.T) { errCountOpt := func() (Option, *atomic.Uint32) { cnt := &atomic.Uint32{} - return WithReportErrorFunc(func(msg string, err error) { + return WithReportErrorFunc(func(ctx context.Context, msg string, err error) { cnt.Add(1) - testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) + testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) }), cnt } failures := []TestFailureInjector[Option]{ - { - Desc: "db, invalid address", - InjectFn: func(t *testing.T, wc Cache) { - c := wc.(*cache) - obj := testutil.GenerateObject() - data, err := obj.Marshal() - require.NoError(t, err) - require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - return b.Put([]byte{1, 2, 3}, data) - })) - }, - }, - { - Desc: "db, invalid object", - InjectFn: func(t *testing.T, wc Cache) { - c := wc.(*cache) - require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - k := []byte(oidtest.Address().EncodeToString()) - v := []byte{1, 2, 3} - return b.Put(k, v) - })) - }, - }, { Desc: "fs, read error", InjectFn: func(t *testing.T, wc Cache) { @@ -118,7 +91,6 @@ const ( type CreateCacheFunc[Option any] func( t *testing.T, - smallSize uint64, meta *meta.DB, bs MainStorage, opts ...Option, @@ -141,12 +113,12 @@ func runFlushTest[Option any]( failures ...TestFailureInjector[Option], ) { t.Run("no errors", func(t *testing.T) { - wc, bs, mb := newCache(t, createCacheFn, smallSize) - defer func() { require.NoError(t, wc.Close()) }() + wc, bs, mb := newCache(t, createCacheFn) + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) require.NoError(t, wc.Flush(context.Background(), false, false)) @@ -154,32 +126,31 @@ func runFlushTest[Option any]( }) t.Run("flush on moving to degraded mode", func(t *testing.T) { - wc, bs, mb := newCache(t, createCacheFn, smallSize) - defer func() { require.NoError(t, wc.Close()) }() + wc, bs, mb := newCache(t, createCacheFn) + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) // Blobstor is read-only, so we expect en error from `flush` here. - require.Error(t, wc.SetMode(mode.Degraded)) + require.Error(t, wc.SetMode(context.Background(), mode.Degraded)) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) - require.NoError(t, wc.SetMode(mode.Degraded)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, wc.SetMode(context.Background(), mode.Degraded)) check(t, mb, bs, objects) }) t.Run("ignore errors", func(t *testing.T) { for _, f := range failures { - f := f t.Run(f.Desc, func(t *testing.T) { errCountOpt, errCount := errCountOption() - wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt) - defer func() { require.NoError(t, wc.Close()) }() + wc, bs, mb := newCache(t, createCacheFn, errCountOpt) + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) f.InjectFn(t, wc) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) require.Equal(t, uint32(0), errCount.Load()) require.Error(t, wc.Flush(context.Background(), false, false)) @@ -195,7 +166,6 @@ func runFlushTest[Option any]( func newCache[Option any]( t *testing.T, createCacheFn CreateCacheFunc[Option], - smallSize uint64, opts ...Option, ) (Cache, *blobstor.BlobStor, *meta.DB) { dir := t.TempDir() @@ -203,7 +173,7 @@ func newCache[Option any]( meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(dummyEpoch{})) require.NoError(t, mb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.Init()) + require.NoError(t, mb.Init(context.Background())) bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{ { @@ -214,15 +184,15 @@ func newCache[Option any]( }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) - wc := createCacheFn(t, smallSize, mb, bs, opts...) + wc := createCacheFn(t, mb, bs, opts...) require.NoError(t, wc.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) // First set mode for metabase and blobstor to prevent background flushes. - require.NoError(t, mb.SetMode(mode.ReadOnly)) - require.NoError(t, bs.SetMode(mode.ReadOnly)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly)) return wc, bs, mb } @@ -264,7 +234,7 @@ func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPai prm.StorageID = mRes.StorageID() res, err := bs.Get(context.Background(), prm) - require.NoError(t, err) + require.NoError(t, err, objects[i].addr) require.Equal(t, objects[i].obj, res.Object) } } diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go index bf26833bd..c0847a65f 100644 --- a/pkg/local_object_storage/writecache/get.go +++ b/pkg/local_object_storage/writecache/get.go @@ -37,11 +37,11 @@ func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, e return nil, ErrDegraded } - obj, err := c.getInternal(ctx, saddr, addr) + obj, err := c.getInternal(ctx, addr) return obj, metaerr.Wrap(err) } -func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address) (*objectSDK.Object, error) { +func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { found := false storageType := StorageTypeUndefined startedAt := time.Now() @@ -49,14 +49,6 @@ func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address) c.metrics.Get(time.Since(startedAt), found, storageType) }() - value, err := Get(c.db, []byte(saddr)) - if err == nil { - obj := objectSDK.New() - found = true - storageType = StorageTypeDB - return obj, obj.Unmarshal(value) - } - res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr}) if err != nil { return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) @@ -87,7 +79,7 @@ func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, return nil, ErrDegraded } - obj, err := c.getInternal(ctx, saddr, addr) + obj, err := c.getInternal(ctx, addr) if err != nil { return nil, metaerr.Wrap(err) } diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go index 9ec039f91..e369fbd50 100644 --- a/pkg/local_object_storage/writecache/iterate.go +++ b/pkg/local_object_storage/writecache/iterate.go @@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error { return b.ForEach(func(k, _ []byte) error { err := addr.DecodeString(string(k)) if err != nil { - return fmt.Errorf("could not parse object address: %w", err) + return fmt.Errorf("parse object address: %w", err) } return f(addr) diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go new file mode 100644 index 000000000..0e020b36e --- /dev/null +++ b/pkg/local_object_storage/writecache/limiter.go @@ -0,0 +1,66 @@ +package writecache + +import ( + "errors" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" +) + +var errLimiterClosed = errors.New("acquire failed: limiter closed") + +// flushLimiter is used to limit the total size of objects +// being flushed to blobstore at the same time. This is a necessary +// limitation so that the flushing process does not have +// a strong impact on user requests. +type flushLimiter struct { + count, size uint64 + maxSize uint64 + cond *sync.Cond + closed bool +} + +func newFlushLimiter(maxSize uint64) *flushLimiter { + return &flushLimiter{ + maxSize: maxSize, + cond: sync.NewCond(&sync.Mutex{}), + } +} + +func (l *flushLimiter) acquire(size uint64) error { + l.cond.L.Lock() + defer l.cond.L.Unlock() + + // it is allowed to overflow maxSize to allow flushing objects with size > maxSize + for l.count > 0 && l.size+size > l.maxSize && !l.closed { + l.cond.Wait() + if l.closed { + return errLimiterClosed + } + } + l.count++ + l.size += size + return nil +} + +func (l *flushLimiter) release(size uint64) { + l.cond.L.Lock() + defer l.cond.L.Unlock() + + assert.True(l.size >= size, "flushLimiter: invalid size") + l.size -= size + + assert.True(l.count > 0, "flushLimiter: invalid count") + l.count-- + + l.cond.Broadcast() +} + +func (l *flushLimiter) close() { + l.cond.L.Lock() + defer l.cond.L.Unlock() + + l.closed = true + + l.cond.Broadcast() +} diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go new file mode 100644 index 000000000..1ca3e1156 --- /dev/null +++ b/pkg/local_object_storage/writecache/limiter_test.go @@ -0,0 +1,27 @@ +package writecache + +import ( + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestLimiter(t *testing.T) { + var maxSize uint64 = 10 + var single uint64 = 3 + l := newFlushLimiter(uint64(maxSize)) + var currSize atomic.Int64 + var eg errgroup.Group + for range 10_000 { + eg.Go(func() error { + defer l.release(single) + defer currSize.Add(-1) + l.acquire(single) + require.True(t, currSize.Add(1) <= 3) + return nil + }) + } + require.NoError(t, eg.Wait()) +} diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go index e68b6d8be..e3641f85e 100644 --- a/pkg/local_object_storage/writecache/metrics.go +++ b/pkg/local_object_storage/writecache/metrics.go @@ -26,9 +26,9 @@ type Metrics interface { Flush(success bool, st StorageType) Evict(st StorageType) - SetEstimateSize(db, fstree uint64) + SetEstimateSize(uint64) SetMode(m mode.ComponentMode) - SetActualCounters(db, fstree uint64) + SetActualCounters(uint64) SetPath(path string) Close() } @@ -47,11 +47,11 @@ func (metricsStub) Delete(time.Duration, bool, StorageType) {} func (metricsStub) Put(time.Duration, bool, StorageType) {} -func (metricsStub) SetEstimateSize(uint64, uint64) {} +func (metricsStub) SetEstimateSize(uint64) {} func (metricsStub) SetMode(mode.ComponentMode) {} -func (metricsStub) SetActualCounters(uint64, uint64) {} +func (metricsStub) SetActualCounters(uint64) {} func (metricsStub) Flush(bool, StorageType) {} diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 4172cfbc8..c491be60b 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -2,21 +2,29 @@ package writecache import ( "context" + "errors" "fmt" + "os" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) +type setModePrm struct { + ignoreErrors bool + shrink bool +} + // SetMode sets write-cache mode of operation. // When shard is put in read-only mode all objects in memory are flushed to disk // and all background jobs are suspended. -func (c *cache) SetMode(m mode.Mode) error { - ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode", +func (c *cache) SetMode(ctx context.Context, m mode.Mode) error { + ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode", trace.WithAttributes( attribute.String("mode", m.String()), )) @@ -25,7 +33,7 @@ func (c *cache) SetMode(m mode.Mode) error { c.modeMtx.Lock() defer c.modeMtx.Unlock() - err := c.setMode(ctx, m, true) + err := c.setMode(ctx, m, setModePrm{ignoreErrors: true}) if err == nil { c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) } @@ -33,28 +41,26 @@ func (c *cache) SetMode(m mode.Mode) error { } // setMode applies new mode. Must be called with cache.modeMtx lock taken. -func (c *cache) setMode(ctx context.Context, m mode.Mode, ignoreErrors bool) error { +func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error { var err error turnOffMeta := m.NoMetabase() if turnOffMeta && !c.mode.NoMetabase() { - err = c.flush(ctx, ignoreErrors) + err = c.flush(ctx, prm.ignoreErrors) if err != nil { return err } } - if c.db != nil { - if err = c.db.Close(); err != nil { - return fmt.Errorf("can't close write-cache database: %w", err) - } + if err := c.closeStorage(ctx, prm.shrink); err != nil { + return err } // Suspend producers to ensure there are channel send operations in fly. // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty // guarantees that there are no in-fly operations. for len(c.flushCh) != 0 { - c.log.Info(logs.WritecacheWaitingForChannelsToFlush) + c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush) time.Sleep(time.Second) } @@ -71,6 +77,44 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, ignoreErrors bool) err return nil } +func (c *cache) closeStorage(ctx context.Context, shrink bool) error { + if c.fsTree == nil { + return nil + } + if !shrink { + if err := c.fsTree.Close(ctx); err != nil { + return fmt.Errorf("close write-cache storage: %w", err) + } + return nil + } + + empty := true + _, err := c.fsTree.Iterate(ctx, common.IteratePrm{ + Handler: func(common.IterationElement) error { + return errIterationCompleted + }, + }) + if err != nil { + if errors.Is(err, errIterationCompleted) { + empty = false + } else { + return fmt.Errorf("check write-cache items: %w", err) + } + } + if err := c.fsTree.Close(ctx); err != nil { + return fmt.Errorf("close write-cache storage: %w", err) + } + if empty { + err := os.RemoveAll(c.path) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("remove write-cache files: %w", err) + } + } else { + c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) + } + return nil +} + // readOnly returns true if current mode is read-only. // `c.modeMtx` must be taken. func (c *cache) readOnly() bool { diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go index f684c15bc..4fbadbc64 100644 --- a/pkg/local_object_storage/writecache/mode_test.go +++ b/pkg/local_object_storage/writecache/mode_test.go @@ -17,14 +17,14 @@ func TestMode(t *testing.T) { WithPath(t.TempDir())) require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly)) - require.Nil(t, wc.(*cache).db) - require.NoError(t, wc.Init()) - require.Nil(t, wc.(*cache).db) - require.NoError(t, wc.Close()) + require.Nil(t, wc.(*cache).fsTree) + require.NoError(t, wc.Init(context.Background())) + require.Nil(t, wc.(*cache).fsTree) + require.NoError(t, wc.Close(context.Background())) require.NoError(t, wc.Open(context.Background(), mode.Degraded)) - require.Nil(t, wc.(*cache).db) - require.NoError(t, wc.Init()) - require.Nil(t, wc.(*cache).db) - require.NoError(t, wc.Close()) + require.Nil(t, wc.(*cache).fsTree) + require.NoError(t, wc.Init(context.Background())) + require.Nil(t, wc.(*cache).fsTree) + require.NoError(t, wc.Close(context.Background())) } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index c8eb1bc45..a4f98ad06 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -1,12 +1,10 @@ package writecache import ( - "io/fs" - "os" - "time" + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -22,35 +20,32 @@ type options struct { metabase Metabase // maxObjectSize is the maximum size of the object stored in the write-cache. maxObjectSize uint64 - // smallObjectSize is the maximum size of the object stored in the database. - smallObjectSize uint64 // workersCount is the number of workers flushing objects in parallel. workersCount int // maxCacheSize is the maximum total size of all objects saved in cache (DB + FS). // 1 GiB by default. maxCacheSize uint64 - // objCounters contains atomic counters for the number of objects stored in cache. - objCounters counters - // maxBatchSize is the maximum batch size for the small object database. - maxBatchSize int - // maxBatchDelay is the maximum batch wait time for the small object database. - maxBatchDelay time.Duration + // maxCacheCount is the maximum total count of all object saved in cache. + // 0 (no limit) by default. + maxCacheCount uint64 // noSync is true iff FSTree allows unsynchronized writes. noSync bool // reportError is the function called when encountering disk errors in background workers. - reportError func(string, error) - // openFile is the function called internally by bbolt to open database files. Useful for hermetic testing. - openFile func(string, int, fs.FileMode) (*os.File, error) + reportError func(context.Context, string, error) // metrics is metrics implementation metrics Metrics // disableBackgroundFlush is for testing purposes only. disableBackgroundFlush bool + // flushSizeLimit is total size of flushing objects. + flushSizeLimit uint64 + // qosLimiter used to limit flush RPS. + qosLimiter qos.Limiter } // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))} + o.log = log } } @@ -84,15 +79,6 @@ func WithMaxObjectSize(sz uint64) Option { } } -// WithSmallObjectSize sets maximum object size to be stored in write-cache. -func WithSmallObjectSize(sz uint64) Option { - return func(o *options) { - if sz > 0 { - o.smallObjectSize = sz - } - } -} - func WithFlushWorkersCount(c int) Option { return func(o *options) { if c > 0 { @@ -108,21 +94,10 @@ func WithMaxCacheSize(sz uint64) Option { } } -// WithMaxBatchSize sets max batch size for the small object database. -func WithMaxBatchSize(sz int) Option { +// WithMaxCacheCount sets maximum write-cache objects count. +func WithMaxCacheCount(v uint64) Option { return func(o *options) { - if sz > 0 { - o.maxBatchSize = sz - } - } -} - -// WithMaxBatchDelay sets max batch delay for the small object database. -func WithMaxBatchDelay(d time.Duration) Option { - return func(o *options) { - if d > 0 { - o.maxBatchDelay = d - } + o.maxCacheCount = v } } @@ -137,19 +112,12 @@ func WithNoSync(noSync bool) Option { } // WithReportErrorFunc sets error reporting function. -func WithReportErrorFunc(f func(string, error)) Option { +func WithReportErrorFunc(f func(context.Context, string, error)) Option { return func(o *options) { o.reportError = f } } -// WithOpenFile sets the OpenFile function to use internally by bolt. Useful for hermetic testing. -func WithOpenFile(f func(string, int, fs.FileMode) (*os.File, error)) Option { - return func(o *options) { - o.openFile = f - } -} - // WithMetrics sets metrics implementation. func WithMetrics(metrics Metrics) Option { return func(o *options) { @@ -163,3 +131,16 @@ func WithDisableBackgroundFlush() Option { o.disableBackgroundFlush = true } } + +// WithFlushSizeLimit sets flush size limit. +func WithFlushSizeLimit(v uint64) Option { + return func(o *options) { + o.flushSizeLimit = v + } +} + +func WithQoSLimiter(l qos.Limiter) Option { + return func(o *options) { + o.qosLimiter = l + } +} diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 0e419f95b..2fbf50913 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -2,13 +2,13 @@ package writecache import ( "context" + "fmt" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -50,65 +50,25 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro return common.PutRes{}, ErrBigObject } - oi := objectInfo{ - addr: prm.Address.EncodeToString(), - obj: prm.Object, - data: prm.RawData, - } - - if sz <= c.smallObjectSize { - storageType = StorageTypeDB - err := c.putSmall(oi) - if err == nil { - added = true - } - return common.PutRes{}, err - } - storageType = StorageTypeFSTree - err := c.putBig(ctx, oi.addr, prm) + err := c.putBig(ctx, prm) if err == nil { added = true } return common.PutRes{}, metaerr.Wrap(err) } -// putSmall persists small objects to the write-cache database and -// pushes the to the flush workers queue. -func (c *cache) putSmall(obj objectInfo) error { - cacheSize := c.estimateCacheSize() - if c.maxCacheSize < c.incSizeDB(cacheSize) { - return ErrOutOfSpace - } - - var newRecord bool - err := c.db.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - key := []byte(obj.addr) - newRecord = b.Get(key) == nil - if newRecord { - return b.Put(key, obj.data) - } - return nil - }) - if err == nil { - storagelog.Write(c.log, - storagelog.AddressField(obj.addr), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("db PUT"), - ) - if newRecord { - c.objCounters.cDB.Add(1) - c.estimateCacheSize() - } - } - return err -} - // putBig writes object to FSTree and pushes it to the flush workers queue. -func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error { - cacheSz := c.estimateCacheSize() - if c.maxCacheSize < c.incSizeFS(cacheSz) { +func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { + if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. + data, err := prm.Object.Marshal() + if err != nil { + return fmt.Errorf("cannot marshal object: %w", err) + } + prm.RawData = data + } + size := uint64(len(prm.RawData)) + if !c.hasEnoughSpace(size) { return ErrOutOfSpace } @@ -117,13 +77,8 @@ func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) erro return err } - if compressor := c.blobstor.Compressor(); compressor != nil && compressor.NeedsCompression(prm.Object) { - c.mtx.Lock() - c.compressFlags[addr] = struct{}{} - c.mtx.Unlock() - } - storagelog.Write(c.log, - storagelog.AddressField(addr), + storagelog.Write(ctx, c.log, + storagelog.AddressField(prm.Address.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree PUT"), ) diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go index 48107a75f..fa224f5e0 100644 --- a/pkg/local_object_storage/writecache/seal.go +++ b/pkg/local_object_storage/writecache/seal.go @@ -9,20 +9,29 @@ import ( "go.opentelemetry.io/otel/trace" ) -func (c *cache) Seal(ctx context.Context, ignoreErrors bool) error { +func (c *cache) Seal(ctx context.Context, prm SealPrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Seal", trace.WithAttributes( - attribute.Bool("ignore_errors", ignoreErrors), + attribute.Bool("ignore_errors", prm.IgnoreErrors), + attribute.Bool("restore_mode", prm.RestoreMode), )) defer span.End() c.modeMtx.Lock() defer c.modeMtx.Unlock() + sourceMode := c.mode // flush will be done by setMode - err := c.setMode(ctx, mode.DegradedReadOnly, ignoreErrors) - if err == nil { - c.metrics.SetMode(mode.ComponentDisabled) + err := c.setMode(ctx, mode.DegradedReadOnly, setModePrm{ignoreErrors: prm.IgnoreErrors, shrink: prm.Shrink}) + if err != nil { + return err + } + c.metrics.SetMode(mode.ComponentDisabled) + if prm.RestoreMode { + err = c.setMode(ctx, sourceMode, setModePrm{ignoreErrors: prm.IgnoreErrors}) + if err == nil { + c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode)) + } } return err } diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index bc75aaf27..7a52d3672 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -1,77 +1,20 @@ package writecache -import ( - "fmt" - "math" - "sync/atomic" +func (c *cache) estimateCacheSize() (uint64, uint64) { + count, size := c.counter.CountSize() + c.metrics.SetEstimateSize(size) + c.metrics.SetActualCounters(count) + return count, size +} - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "go.etcd.io/bbolt" -) - -func (c *cache) estimateCacheSize() uint64 { - dbCount := c.objCounters.DB() - fsCount := c.objCounters.FS() - if fsCount > 0 { - fsCount-- // db file +func (c *cache) hasEnoughSpace(objectSize uint64) bool { + count, size := c.estimateCacheSize() + if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { + return false } - dbSize := dbCount * c.smallObjectSize - fsSize := fsCount * c.maxObjectSize - c.metrics.SetEstimateSize(dbSize, fsSize) - c.metrics.SetActualCounters(dbCount, fsCount) - return dbSize + fsSize + return c.maxCacheSize >= size+objectSize } -func (c *cache) incSizeDB(sz uint64) uint64 { - return sz + c.smallObjectSize -} - -func (c *cache) incSizeFS(sz uint64) uint64 { - return sz + c.maxObjectSize -} - -var _ fstree.FileCounter = &counters{} - -type counters struct { - cDB, cFS atomic.Uint64 -} - -func (x *counters) DB() uint64 { - return x.cDB.Load() -} - -func (x *counters) FS() uint64 { - return x.cFS.Load() -} - -// Set implements fstree.ObjectCounter. -func (x *counters) Set(v uint64) { - x.cFS.Store(v) -} - -// Inc implements fstree.ObjectCounter. -func (x *counters) Inc() { - x.cFS.Add(1) -} - -// Dec implements fstree.ObjectCounter. -func (x *counters) Dec() { - x.cFS.Add(math.MaxUint64) -} - -func (c *cache) initCounters() error { - var inDB uint64 - err := c.db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - if b != nil { - inDB = uint64(b.Stats().KeyN) - } - return nil - }) - if err != nil { - return fmt.Errorf("could not read write-cache DB counter: %w", err) - } - c.objCounters.cDB.Store(inDB) +func (c *cache) initCounters() { c.estimateCacheSize() - return nil } diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index caf997af8..e88566cdf 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -3,7 +3,6 @@ package writecache import ( "context" "fmt" - "math" "os" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -14,101 +13,39 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" "go.uber.org/zap" ) -// store represents persistent storage with in-memory LRU cache -// for flushed items on top of it. -type store struct { - db *bbolt.DB -} - -const dbName = "small.bolt" - func (c *cache) openStore(mod mode.ComponentMode) error { err := util.MkdirAllX(c.path, os.ModePerm) if err != nil { return err } - c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile) - if err != nil { - return fmt.Errorf("could not open database: %w", err) - } - - c.db.MaxBatchSize = c.maxBatchSize - c.db.MaxBatchDelay = c.maxBatchDelay - - if !mod.ReadOnly() { - err = c.db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(defaultBucket) - return err - }) - if err != nil { - return fmt.Errorf("could not create default bucket: %w", err) - } - } - c.fsTree = fstree.New( fstree.WithPath(c.path), fstree.WithPerm(os.ModePerm), fstree.WithDepth(1), fstree.WithDirNameLen(1), fstree.WithNoSync(c.noSync), - fstree.WithFileCounter(&c.objCounters), + fstree.WithFileCounter(c.counter), ) if err := c.fsTree.Open(mod); err != nil { - return fmt.Errorf("could not open FSTree: %w", err) + return fmt.Errorf("open FSTree: %w", err) } if err := c.fsTree.Init(); err != nil { - return fmt.Errorf("could not init FSTree: %w", err) + return fmt.Errorf("init FSTree: %w", err) } return nil } -func (c *cache) deleteFromDB(key string, batched bool) { - var recordDeleted bool - var err error - if batched { - err = c.db.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - key := []byte(key) - recordDeleted = b.Get(key) != nil - return b.Delete(key) - }) - } else { - err = c.db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - key := []byte(key) - recordDeleted = b.Get(key) != nil - return b.Delete(key) - }) - } - - if err == nil { - c.metrics.Evict(StorageTypeDB) - storagelog.Write(c.log, - storagelog.AddressField(key), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("db DELETE"), - ) - if recordDeleted { - c.objCounters.cDB.Add(math.MaxUint64) - c.estimateCacheSize() - } - } else { - c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err)) - } -} - -func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address) { - _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr}) +func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) { + _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size}) if err != nil && !client.IsErrObjectNotFound(err) { - c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) + c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) } else if err == nil { - storagelog.Write(c.log, + storagelog.Write(ctx, c.log, storagelog.AddressField(addr.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree DELETE"), diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go new file mode 100644 index 000000000..5eb341ba4 --- /dev/null +++ b/pkg/local_object_storage/writecache/upgrade.go @@ -0,0 +1,110 @@ +package writecache + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.etcd.io/bbolt" +) + +const dbName = "small.bolt" + +var defaultBucket = []byte{0} + +func (c *cache) flushAndDropBBoltDB(ctx context.Context) error { + _, err := os.Stat(filepath.Join(c.path, dbName)) + if err != nil && os.IsNotExist(err) { + return nil + } + if err != nil { + return fmt.Errorf("check write-cache database existence: %w", err) + } + db, err := OpenDB(c.path, true, os.OpenFile) + if err != nil { + return fmt.Errorf("open write-cache database: %w", err) + } + defer func() { + _ = db.Close() + }() + + var last string + for { + batch, err := c.readNextDBBatch(db, last) + if err != nil { + return err + } + if len(batch) == 0 { + break + } + for _, item := range batch { + var obj objectSDK.Object + if err := obj.Unmarshal(item.data); err != nil { + return fmt.Errorf("unmarshal object from database: %w", err) + } + if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil { + return fmt.Errorf("flush object from database: %w", err) + } + } + last = batch[len(batch)-1].address + } + if err := db.Close(); err != nil { + return fmt.Errorf("close write-cache database: %w", err) + } + if err := os.Remove(filepath.Join(c.path, dbName)); err != nil { + return fmt.Errorf("remove write-cache database: %w", err) + } + return nil +} + +type batchItem struct { + data []byte + address string +} + +func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) { + const batchSize = 100 + var batch []batchItem + err := db.View(func(tx *bbolt.Tx) error { + var addr oid.Address + + b := tx.Bucket(defaultBucket) + cs := b.Cursor() + for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() { + sa := string(k) + if sa == last { + continue + } + if err := addr.DecodeString(sa); err != nil { + return fmt.Errorf("decode address from database: %w", err) + } + + batch = append(batch, batchItem{data: bytes.Clone(data), address: sa}) + if len(batch) == batchSize { + return errIterationCompleted + } + } + return nil + }) + if err == nil || errors.Is(err, errIterationCompleted) { + return batch, nil + } + return nil, err +} + +// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true. +func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) { + return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{ + NoFreelistSync: true, + ReadOnly: ro, + Timeout: 100 * time.Millisecond, + OpenFile: openFile, + }) +} diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go deleted file mode 100644 index 0ed4a954e..000000000 --- a/pkg/local_object_storage/writecache/util.go +++ /dev/null @@ -1,20 +0,0 @@ -package writecache - -import ( - "io/fs" - "os" - "path/filepath" - "time" - - "go.etcd.io/bbolt" -) - -// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true. -func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) { - return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{ - NoFreelistSync: true, - ReadOnly: ro, - Timeout: 100 * time.Millisecond, - OpenFile: openFile, - }) -} diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 71dba61cf..7ed511318 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -20,6 +20,12 @@ type Info struct { Path string } +type SealPrm struct { + IgnoreErrors bool + RestoreMode bool + Shrink bool +} + // Cache represents write-cache for objects. type Cache interface { Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) @@ -32,21 +38,21 @@ type Cache interface { // Returns ErrReadOnly if the Cache is currently in the read-only mode. Delete(context.Context, oid.Address) error Put(context.Context, common.PutPrm) (common.PutRes, error) - SetMode(mode.Mode) error + SetMode(context.Context, mode.Mode) error SetLogger(*logger.Logger) DumpInfo() Info Flush(context.Context, bool, bool) error - Seal(context.Context, bool) error + Seal(context.Context, SealPrm) error - Init() error + Init(context.Context) error Open(ctx context.Context, mode mode.Mode) error - Close() error + Close(context.Context) error GetMetrics() Metrics } // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Config + Compressor() *compression.Compressor Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go index b6718dea5..2849f3052 100644 --- a/pkg/morph/client/actor.go +++ b/pkg/morph/client/actor.go @@ -16,7 +16,7 @@ type actorProvider interface { GetRPCActor() actor.RPCActor } -// Client switches an established connection with neo-go if it is broken. +// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken. // This leads to an invalidation of an rpc actor within Client. That means the // components that are initilized with the rpc actor may unintentionally use // it when it is already invalidated. SwitchRPCGuardedActor is used to prevent diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index aae245acd..4462daab4 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -1,36 +1,33 @@ package balance import ( + "context" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. -func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { - h, err := address.StringToUint160(id.EncodeToString()) - if err != nil { - return nil, err - } +func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) { + h := id.ScriptHash() invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(balanceOfMethod) invokePrm.SetArgs(h) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) } else if ln := len(prms); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln) } amount, err := client.BigIntFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err) } return amount, nil } diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go index 4befbef45..f4685b0ab 100644 --- a/pkg/morph/client/balance/burn.go +++ b/pkg/morph/client/balance/burn.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (b *BurnPrm) SetID(id []byte) { } // Burn destroys funds from the account. -func (c *Client) Burn(p BurnPrm) error { +func (c *Client) Burn(ctx context.Context, p BurnPrm) error { prm := client.InvokePrm{} prm.SetMethod(burnMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go index b05c526dc..1dacb9574 100644 --- a/pkg/morph/client/balance/client.go +++ b/pkg/morph/client/balance/client.go @@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create static client of Balance contract: %w", err) + return nil, fmt.Errorf("create 'balance' contract client: %w", err) } return &Client{ @@ -54,15 +54,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index 39e4b28e5..57e61d62b 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,20 +9,20 @@ import ( // Decimals decimal precision of currency transactions // through the Balance contract call, and returns it. -func (c *Client) Decimals() (uint32, error) { +func (c *Client) Decimals(ctx context.Context) (uint32, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(decimalsMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) } else if ln := len(prms); ln != 1 { return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln) } decimals, err := client.IntFromStackItem(prms[0]) if err != nil { - return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err) } return uint32(decimals), nil } diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go index a5b206799..83e8b0586 100644 --- a/pkg/morph/client/balance/lock.go +++ b/pkg/morph/client/balance/lock.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -42,12 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) { } // Lock locks fund on the user account. -func (c *Client) Lock(p LockPrm) error { +func (c *Client) Lock(ctx context.Context, p LockPrm) error { prm := client.InvokePrm{} prm.SetMethod(lockMethod) prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go index 73448da31..082ade85e 100644 --- a/pkg/morph/client/balance/mint.go +++ b/pkg/morph/client/balance/mint.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (m *MintPrm) SetID(id []byte) { } // Mint sends funds to the account. -func (c *Client) Mint(p MintPrm) error { +func (c *Client) Mint(ctx context.Context, p MintPrm) error { prm := client.InvokePrm{} prm.SetMethod(mintMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 08fb05289..870bed166 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -1,11 +1,11 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // TransferPrm groups parameters of TransferX method. @@ -21,27 +21,18 @@ type TransferPrm struct { // TransferX transfers p.Amount of GASe-12 from p.From to p.To // with details p.Details through direct smart contract call. -// -// If TryNotary is provided, calls notary contract. -func (c *Client) TransferX(p TransferPrm) error { - from, err := address.StringToUint160(p.From.EncodeToString()) - if err != nil { - return err - } - - to, err := address.StringToUint160(p.To.EncodeToString()) - if err != nil { - return err - } +func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { + from := p.From.ScriptHash() + to := p.To.ScriptHash() prm := client.InvokePrm{} prm.SetMethod(transferXMethod) prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err = c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err) + return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) } return nil } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index df521f56b..aab058d27 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "time" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -19,6 +20,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" @@ -59,6 +61,9 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper + nnsHash util.Uint160 // NNS contract hash + + nnsReader *nnsClient.ContractReader // NNS contract wrapper acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -93,27 +98,12 @@ type Client struct { type cache struct { m sync.RWMutex - nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } -func (c *cache) nns() *util.Uint160 { - c.m.RLock() - defer c.m.RUnlock() - - return c.nnsHash -} - -func (c *cache) setNNSHash(nnsHash util.Uint160) { - c.m.Lock() - defer c.m.Unlock() - - c.nnsHash = &nnsHash -} - func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -132,7 +122,6 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() - c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } @@ -162,24 +151,10 @@ func (e *notHaltStateError) Error() string { ) } -// implementation of error interface for FrostFS-specific errors. -type frostfsError struct { - err error -} - -func (e frostfsError) Error() string { - return fmt.Sprintf("frostfs error: %v", e.err) -} - -// wraps FrostFS-specific error into frostfsError. Arg must not be nil. -func wrapFrostFSError(err error) error { - return frostfsError{err} -} - // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. -func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) { start := time.Now() success := false defer func() { @@ -190,29 +165,29 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...) if err != nil { - return 0, fmt.Errorf("could not invoke %s: %w", method, err) + return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err) } - c.logger.Debug(logs.ClientNeoClientInvoke, + c.logger.Debug(ctx, logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) success = true - return vub, nil + return InvokeRes{Hash: txHash, VUB: vub}, nil } // TestInvokeIterator invokes contract method returning an iterator and executes cb on each element. // If cb returns an error, the session is closed and this error is returned as-is. -// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. +// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { +func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { start := time.Now() success := false defer func() { @@ -239,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return ¬HaltStateError{state: val.State, exception: val.FaultException} } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -261,10 +236,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int }() // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := batchSize - if invoker.DefaultIteratorResultItems < traverseBatchSize { - traverseBatchSize = invoker.DefaultIteratorResultItems - } + traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) for { items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) if err != nil { @@ -306,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} } success = true @@ -327,7 +299,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error return err } - c.logger.Debug(logs.ClientNativeGasTransferInvoke, + c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke, zap.String("to", receiver.StringLE()), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -361,7 +333,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8 return err } - c.logger.Debug(logs.ClientBatchGasTransferInvoke, + c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke, zap.Strings("to", receiversLog), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -388,8 +360,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(logs.ClientCantGetBlockchainHeight, - zap.String("error", err.Error())) + c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight, + zap.Error(err)) return nil } @@ -402,8 +374,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(logs.ClientCantGetBlockchainHeight243, - zap.String("error", err.Error())) + c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243, + zap.Error(err)) return nil } @@ -461,6 +433,28 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) { return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil } +func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return nil, ErrConnectionLost + } + + return c.client.GetApplicationLog(hash, trig) +} + +func (c *Client) GetVersion() (*result.Version, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return nil, ErrConnectionLost + } + + return c.client.GetVersion() +} + // TxHeight returns true if transaction has been successfully executed and persisted. func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { c.switchLock.RLock() @@ -476,7 +470,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { // NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain // stores alphabet node keys of inner ring there, however the sidechain stores both // alphabet and non alphabet node keys of inner ring. -func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -486,7 +480,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { list, err := c.roleList(noderoles.NeoFSAlphabet) if err != nil { - return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err) + return nil, fmt.Errorf("get alphabet nodes role list: %w", err) } return list, nil @@ -500,7 +494,7 @@ func (c *Client) GetDesignateHash() util.Uint160 { func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) { height, err := c.rpcActor.GetBlockCount() if err != nil { - return nil, fmt.Errorf("can't get chain height: %w", err) + return nil, fmt.Errorf("get chain height: %w", err) } return c.rolemgmt.GetDesignatedByRole(r, height) @@ -571,6 +565,7 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) + c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 648c7d3c0..e4dcd0db7 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -41,13 +42,13 @@ type cfg struct { endpoints []Endpoint - singleCli *rpcclient.WSClient // neo-go client for single client mode - inactiveModeCb Callback switchInterval time.Duration morphCacheMetrics metrics.MorphCacheMetrics + + dialerSource DialerSource } const ( @@ -60,13 +61,14 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint") func defaultConfig() *cfg { return &cfg{ dialTimeout: defaultDialTimeout, - logger: &logger.Logger{Logger: zap.L()}, + logger: logger.NewLoggerWrapper(zap.L()), metrics: morphmetrics.NoopRegister{}, waitInterval: defaultWaitInterval, signer: &transaction.Signer{ Scopes: transaction.Global, }, morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{}, + dialerSource: &noopDialerSource{}, } } @@ -124,37 +126,30 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er var err error var act *actor.Actor - if cfg.singleCli != nil { - // return client in single RPC node mode that uses - // predefined WS client - // - // in case of the closing web socket connection: - // if extra endpoints were provided via options, - // they will be used in switch process, otherwise - // inactive mode will be enabled - cli.client = cfg.singleCli - - act, err = newActor(cfg.singleCli, acc, *cfg) + var endpoint Endpoint + for cli.endpoints.curr, endpoint = range cli.endpoints.list { + cli.client, act, err = cli.newCli(ctx, endpoint) if err != nil { - return nil, fmt.Errorf("could not create RPC actor: %w", err) - } - } else { - var endpoint Endpoint - for cli.endpoints.curr, endpoint = range cli.endpoints.list { - cli.client, act, err = cli.newCli(ctx, endpoint) - if err != nil { - cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint, - zap.Error(err), zap.String("endpoint", endpoint.Address)) - } else { - cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint, - zap.String("endpoint", endpoint.Address)) - break + cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint, + zap.Error(err), zap.String("endpoint", endpoint.Address)) + } else { + cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint, + zap.String("endpoint", endpoint.Address)) + if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 { + cli.switchIsActive.Store(true) + go cli.switchToMostPrioritized(ctx) } - } - if cli.client == nil { - return nil, ErrNoHealthyEndpoint + break } } + if cli.client == nil { + return nil, ErrNoHealthyEndpoint + } + cs, err := cli.client.GetContractStateByID(nnsContractID) + if err != nil { + return nil, fmt.Errorf("resolve nns hash: %w", err) + } + cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) @@ -171,6 +166,7 @@ func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSCl Options: rpcclient.Options{ DialTimeout: c.cfg.dialTimeout, TLSClientConfig: cfg, + NetDialContext: c.cfg.dialerSource.NetContextDialer(), }, }) if err != nil { @@ -281,17 +277,6 @@ func WithEndpoints(endpoints ...Endpoint) Option { } } -// WithSingleClient returns a client constructor option -// that specifies single neo-go client and forces Client -// to use it for requests. -// -// Passed client must already be initialized. -func WithSingleClient(cli *rpcclient.WSClient) Option { - return func(c *cfg) { - c.singleCli = cli - } -} - // WithConnLostCallback return a client constructor option // that specifies a callback that is called when Client // unsuccessfully tried to connect to all the specified @@ -316,3 +301,19 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option { c.morphCacheMetrics = morphCacheMetrics } } + +type DialerSource interface { + NetContextDialer() func(context.Context, string, string) (net.Conn, error) +} + +type noopDialerSource struct{} + +func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { + return nil +} + +func WithDialerSource(ds DialerSource) Option { + return func(c *cfg) { + c.dialerSource = ds + } +} diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index 9dd3a337b..be684619b 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -27,17 +27,8 @@ const ( getMethod = "get" listMethod = "list" containersOfMethod = "containersOf" - eaclMethod = "eACL" - setEACLMethod = "setEACL" deletionInfoMethod = "deletionInfo" - startEstimationMethod = "startContainerEstimation" - stopEstimationMethod = "stopContainerEstimation" - - putSizeMethod = "putContainerSize" - listSizesMethod = "listContainerSizes" - getSizeMethod = "getContainerSize" - // putNamedMethod is method name for container put with an alias. It is exported to provide custom fee. putNamedMethod = "putNamed" ) @@ -55,9 +46,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts[i](o) } - sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...) + sc, err := client.NewStatic(cli, contract, fee, *o...) if err != nil { - return nil, fmt.Errorf("can't create container static client: %w", err) + return nil, fmt.Errorf("create 'container' contract client: %w", err) } return &Client{client: sc}, nil @@ -77,20 +68,10 @@ func (c Client) ContractAddress() util.Uint160 { // parameter of Wrapper. type Option func(*opts) -type opts struct { - staticOpts []client.StaticClientOption -} +type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX @@ -100,6 +81,6 @@ func TryNotary() Option { // Considered to be used by IR nodes only. func AsAlphabet() Option { return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.AsAlphabet()) + *o = append(*o, client.AsAlphabet()) } } diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index c4db0fe6e..60fb8ad7c 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -1,10 +1,9 @@ package container import ( + "context" "errors" - "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" @@ -15,28 +14,37 @@ import ( // to the specified user of FrostFS system. If idUser is nil, returns the list of all containers. // // If remote RPC does not support neo-go session API, fallback to List() method. -func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { - var rawID []byte +func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) { + var cidList []cid.ID + var err error + cb := func(id cid.ID) error { + cidList = append(cidList, id) + return nil + } + if err = c.IterateContainersOf(ctx, idUser, cb); err != nil { + return nil, err + } + return cidList, nil +} + +// iterateContainers iterates over a list of container identifiers +// belonging to the specified user of FrostFS system and executes +// `cb` on each element. If idUser is nil, calls it on the list of all containers. +func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error { + var rawID []byte if idUser != nil { rawID = idUser.WalletBytes() } - var cidList []cid.ID - cb := func(item stackitem.Item) error { - rawID, err := client.BytesFromStackItem(item) + itemCb := func(item stackitem.Item) error { + id, err := getCIDfromStackItem(item) if err != nil { - return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err) + return err } - - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) return nil } @@ -50,13 +58,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { const batchSize = 512 cnrHash := c.client.ContractAddress() - err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID) - if err != nil { - if errors.Is(err, unwrap.ErrNoSessionID) { - return c.list(idUser) - } - return nil, err + err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) + if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { + return c.iterate(ctx, idUser, cb) } - return cidList, nil + return err } diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index 20351b570..09912efa5 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" @@ -12,7 +13,7 @@ import ( // along with signature and session token. // // Returns error if container ID is nil. -func Delete(c *Client, witness core.RemovalWitness) error { +func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error { binCnr := make([]byte, sha256.Size) witness.ContainerID.Encode(binCnr) @@ -26,7 +27,7 @@ func Delete(c *Client, witness core.RemovalWitness) error { prm.SetToken(tok.Marshal()) } - _, err := c.Delete(prm) + _, err := c.Delete(ctx, prm) return err } @@ -65,9 +66,7 @@ func (d *DeletePrm) SetKey(key []byte) { // // Returns valid until block and any error encountered that caused // the removal to interrupt. -// -// If TryNotary is provided, calls notary contract. -func (c *Client) Delete(p DeletePrm) (uint32, error) { +func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { if len(p.signature) == 0 && !p.IsControl() { return 0, errNilArgument } @@ -77,9 +76,9 @@ func (c *Client) Delete(p DeletePrm) (uint32, error) { prm.SetArgs(p.cnr, p.signature, p.key, p.token) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index dda6bf98c..90bcdd7d5 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" @@ -14,39 +15,39 @@ import ( "github.com/mr-tron/base58" ) -func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) { - return DeletionInfo((*Client)(x), cnr) +func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) { + return DeletionInfo(ctx, (*Client)(x), cnr) } type deletionInfo interface { - DeletionInfo(cid []byte) (*containercore.DelInfo, error) + DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) } -func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { +func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.DeletionInfo(binCnr) + return c.DeletionInfo(ctx, binCnr) } -func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { +func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) { prm := client.TestInvokePrm{} prm.SetMethod(deletionInfoMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln) } arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err) } if len(arr) != 2 { @@ -55,17 +56,17 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { rawOwner, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err) } var owner user.ID if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil { - return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err) } epoch, err := client.BigIntFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err) } return &containercore.DelInfo{ diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go deleted file mode 100644 index 8e9455050..000000000 --- a/pkg/morph/client/container/eacl.go +++ /dev/null @@ -1,95 +0,0 @@ -package container - -import ( - "crypto/sha256" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -// GetEACL reads the extended ACL table from FrostFS system -// through Container contract call. -// -// Returns apistatus.EACLNotFound if eACL table is missing in the contract. -func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - prm := client.TestInvokePrm{} - prm.SetMethod(eaclMethod) - prm.SetArgs(binCnr) - - prms, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln) - } - - arr, err := client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err) - } - - if len(arr) != 4 { - return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr)) - } - - rawEACL, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err) - } - - sig, err := client.BytesFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err) - } - - // Client may not return errors if the table is missing, so check this case additionally. - // The absence of a signature in the response can be taken as an eACL absence criterion, - // since unsigned table cannot be approved in the storage by design. - if len(sig) == 0 { - return nil, new(apistatus.EACLNotFound) - } - - pub, err := client.BytesFromStackItem(arr[2]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err) - } - - binToken, err := client.BytesFromStackItem(arr[3]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err) - } - - var res container.EACL - - res.Value = eacl.NewTable() - if err = res.Value.Unmarshal(rawEACL); err != nil { - return nil, err - } - - if len(binToken) > 0 { - res.Session = new(session.Container) - - err = res.Session.Unmarshal(binToken) - if err != nil { - return nil, fmt.Errorf("could not unmarshal session token: %w", err) - } - } - - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(pub) - sigV2.SetSign(sig) - sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256) - - err = res.Signature.ReadFromV2(sigV2) - return &res, err -} diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go deleted file mode 100644 index f288c63cf..000000000 --- a/pkg/morph/client/container/estimations.go +++ /dev/null @@ -1,54 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// StartEstimationPrm groups parameters of StartEstimation operation. -type StartEstimationPrm struct { - commonEstimationPrm -} - -// StopEstimationPrm groups parameters of StopEstimation operation. -type StopEstimationPrm struct { - commonEstimationPrm -} - -type commonEstimationPrm struct { - epoch uint64 - - client.InvokePrmOptional -} - -// SetEpoch sets epoch. -func (p *commonEstimationPrm) SetEpoch(epoch uint64) { - p.epoch = epoch -} - -// StartEstimation votes to produce start estimation notification. -func (c *Client) StartEstimation(p StartEstimationPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(startEstimationMethod) - prm.SetArgs(p.epoch) - prm.InvokePrmOptional = p.InvokePrmOptional - - if _, err := c.client.Invoke(prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err) - } - return nil -} - -// StopEstimation votes to produce stop estimation notification. -func (c *Client) StopEstimation(p StopEstimationPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(stopEstimationMethod) - prm.SetArgs(p.epoch) - prm.InvokePrmOptional = p.InvokePrmOptional - - if _, err := c.client.Invoke(prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err) - } - return nil -} diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 6715f870f..8622d2cdd 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -1,14 +1,15 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" @@ -16,8 +17,8 @@ import ( type containerSource Client -func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) { - return Get((*Client)(x), cnr) +func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) { + return Get(ctx, (*Client)(x), cnr) } // AsContainerSource provides container Source interface @@ -27,15 +28,15 @@ func AsContainerSource(w *Client) containercore.Source { } type getContainer interface { - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } // Get marshals container ID, and passes it to Wrapper's Get method. -func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { +func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.Get(binCnr) + return c.Get(ctx, binCnr) } // Get reads the container from FrostFS system by binary identifier @@ -43,24 +44,24 @@ func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { // // If an empty slice is returned for the requested identifier, // storage.ErrNotFound error is returned. -func (c *Client) Get(cid []byte) (*containercore.Container, error) { +func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { prm := client.TestInvokePrm{} prm.SetMethod(getMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln) } arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err) } if len(arr) != 4 { @@ -69,29 +70,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { cnrBytes, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err) } sigBytes, err := client.BytesFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err) } pub, err := client.BytesFromStackItem(arr[2]) if err != nil { - return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err) } tokBytes, err := client.BytesFromStackItem(arr[3]) if err != nil { - return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err) } var cnr containercore.Container if err := cnr.Value.Unmarshal(cnrBytes); err != nil { // use other major version if there any - return nil, fmt.Errorf("can't unmarshal container: %w", err) + return nil, fmt.Errorf("unmarshal container: %w", err) } if len(tokBytes) > 0 { @@ -99,7 +100,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { err = cnr.Session.Unmarshal(tokBytes) if err != nil { - return nil, fmt.Errorf("could not unmarshal session token: %w", err) + return nil, fmt.Errorf("unmarshal session token: %w", err) } } diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index 6fed46c1a..fc63d1beb 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -1,20 +1,22 @@ package container import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" ) -// list returns a list of container identifiers belonging +// iterate iterates through a list of container identifiers belonging // to the specified user of FrostFS system. The list is composed // through Container contract call. // -// Returns the identifiers of all FrostFS containers if pointer +// Iterates through the identifiers of all FrostFS containers if pointer // to user identifier is nil. -func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { +func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error { var rawID []byte if idUser != nil { @@ -25,34 +27,43 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { prm.SetMethod(listMethod) prm.SetArgs(rawID) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err) + return fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) + return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) } res, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err) + return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) } - cidList := make([]cid.ID, 0, len(res)) for i := range res { - rawID, err := client.BytesFromStackItem(res[i]) + id, err := getCIDfromStackItem(res[i]) if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err) + return err } - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) } - return cidList, nil + return nil +} + +func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) { + rawID, err := client.BytesFromStackItem(item) + if err != nil { + return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) + } + + var id cid.ID + + err = id.Decode(rawID) + if err != nil { + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) + } + return id, nil } diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go deleted file mode 100644 index b5263d7a6..000000000 --- a/pkg/morph/client/container/load.go +++ /dev/null @@ -1,171 +0,0 @@ -package container - -import ( - "crypto/sha256" - "fmt" - - v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// AnnounceLoadPrm groups parameters of AnnounceLoad operation. -type AnnounceLoadPrm struct { - a container.SizeEstimation - key []byte - - client.InvokePrmOptional -} - -// SetAnnouncement sets announcement. -func (a2 *AnnounceLoadPrm) SetAnnouncement(a container.SizeEstimation) { - a2.a = a -} - -// SetReporter sets public key of the reporter. -func (a2 *AnnounceLoadPrm) SetReporter(key []byte) { - a2.key = key -} - -// AnnounceLoad saves container size estimation calculated by storage node -// with key in FrostFS system through Container contract call. -// -// Returns any error encountered that caused the saving to interrupt. -func (c *Client) AnnounceLoad(p AnnounceLoadPrm) error { - binCnr := make([]byte, sha256.Size) - p.a.Container().Encode(binCnr) - - prm := client.InvokePrm{} - prm.SetMethod(putSizeMethod) - prm.SetArgs(p.a.Epoch(), binCnr, p.a.Value(), p.key) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(prm) - if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", putSizeMethod, err) - } - return nil -} - -// EstimationID is an identity of container load estimation inside Container contract. -type EstimationID []byte - -// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch. -// The list is composed through Container contract call. -func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(listSizesMethod) - invokePrm.SetArgs(epoch) - - prms, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln) - } - - prms, err = client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err) - } - - res := make([]EstimationID, 0, len(prms)) - for i := range prms { - id, err := client.BytesFromStackItem(prms[i]) - if err != nil { - return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err) - } - - res = append(res, id) - } - - return res, nil -} - -// Estimation is a structure of single container load estimation -// reported by storage node. -type Estimation struct { - Size uint64 - - Reporter []byte -} - -// Estimations is a structure of grouped container load estimation inside Container contract. -type Estimations struct { - ContainerID cid.ID - - Values []Estimation -} - -// GetUsedSpaceEstimations returns a list of container load estimations by ID. -// The list is composed through Container contract call. -func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(getSizeMethod) - prm.SetArgs([]byte(id)) - - prms, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln) - } - - prms, err = client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err) - } else if ln := len(prms); ln != 2 { - return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod) - } - - rawCnr, err := client.BytesFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err) - } - - prms, err = client.ArrayFromStackItem(prms[1]) - if err != nil { - return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err) - } - - var cnr cid.ID - - err = cnr.Decode(rawCnr) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - v2 := new(v2refs.ContainerID) - v2.SetValue(rawCnr) - res := &Estimations{ - ContainerID: cnr, - Values: make([]Estimation, 0, len(prms)), - } - - for i := range prms { - arr, err := client.ArrayFromStackItem(prms[i]) - if err != nil { - return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err) - } else if ln := len(arr); ln != 2 { - return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod) - } - - reporter, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err) - } - - sz, err := client.IntFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err) - } - - res.Values = append(res.Values, Estimation{ - Reporter: reporter, - Size: uint64(sz), - }) - } - - return res, nil -} diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index ee323af00..3bb84eb87 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -1,11 +1,12 @@ package container import ( + "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" ) @@ -14,7 +15,7 @@ import ( // along with sig.Key() and sig.Sign(). // // Returns error if container is nil. -func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { +func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) { data := cnr.Value.Marshal() d := container.ReadDomain(cnr.Value) @@ -35,7 +36,7 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { prm.SetKey(sigV2.GetKey()) prm.SetSignature(sigV2.GetSign()) - err := c.Put(prm) + err := c.Put(ctx, prm) if err != nil { return nil, err } @@ -93,9 +94,7 @@ func (p *PutPrm) SetZone(zone string) { // // Returns calculated container identifier and any error // encountered that caused the saving to interrupt. -// -// If TryNotary is provided, calls notary contract. -func (c *Client) Put(p PutPrm) error { +func (c *Client) Put(ctx context.Context, p PutPrm) error { if len(p.sig) == 0 || len(p.key) == 0 { return errNilArgument } @@ -116,9 +115,9 @@ func (c *Client) Put(p PutPrm) error { prm.SetMethod(method) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go index 016b56f8f..d3eba7639 100644 --- a/pkg/morph/client/frostfs/cheque.go +++ b/pkg/morph/client/frostfs/cheque.go @@ -1,6 +1,8 @@ package frostfscontract import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,13 +39,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) { } // Cheque invokes `cheque` method of FrostFS contract. -func (x *Client) Cheque(p ChequePrm) error { +func (x *Client) Cheque(ctx context.Context, p ChequePrm) error { prm := client.InvokePrm{} prm.SetMethod(chequeMethod) prm.SetArgs(p.id, p.user, p.amount, p.lock) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } @@ -66,12 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) { } // AlphabetUpdate update list of alphabet nodes. -func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error { +func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error { prm := client.InvokePrm{} prm.SetMethod(alphabetUpdateMethod) prm.SetArgs(p.id, p.pubs) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go index 571915c27..cd6a9849e 100644 --- a/pkg/morph/client/frostfs/client.go +++ b/pkg/morph/client/frostfs/client.go @@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err) + return nil, fmt.Errorf("create 'frostfs' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go index 4c31f42de..61eb03f09 100644 --- a/pkg/morph/client/frostfsid/client.go +++ b/pkg/morph/client/frostfsid/client.go @@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil) func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) { sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet()) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err) + return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 0852f536c..3a789672a 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -1,6 +1,7 @@ package frostfsid import ( + "context" "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -14,14 +15,14 @@ const ( methodGetSubjectExtended = "getSubjectExtended" ) -func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubject) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) } structArr, err := checkStackItem(res) @@ -31,20 +32,20 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) subj, err := frostfsidclient.ParseSubject(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil } -func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { +func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubjectExtended) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) } structArr, err := checkStackItem(res) @@ -54,7 +55,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject subj, err := frostfsidclient.ParseSubjectExtended(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil @@ -67,7 +68,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error structArr, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err) } return } diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 10ed21582..b9e39c25e 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -2,6 +2,7 @@ package client import ( "context" + "slices" "sort" "time" @@ -42,7 +43,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool { newEndpoint := c.endpoints.list[c.endpoints.curr] cli, act, err := c.newCli(ctx, newEndpoint) if err != nil { - c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, + c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, zap.String("endpoint", newEndpoint.Address), zap.Error(err), ) @@ -52,7 +53,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool { c.cache.invalidate() - c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, + c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, zap.String("endpoint", newEndpoint.Address)) c.client = cli @@ -99,8 +100,7 @@ mainLoop: case <-t.C: c.switchLock.RLock() - endpointsCopy := make([]Endpoint, len(c.endpoints.list)) - copy(endpointsCopy, c.endpoints.list) + endpointsCopy := slices.Clone(c.endpoints.list) currPriority := c.endpoints.list[c.endpoints.curr].Priority highestPriority := c.endpoints.list[0].Priority @@ -122,7 +122,7 @@ mainLoop: cli, act, err := c.newCli(ctx, e) if err != nil { - c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode, + c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode, zap.String("endpoint", tryE), zap.Error(err), ) @@ -147,7 +147,7 @@ mainLoop: c.switchLock.Unlock() - c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC, + c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC, zap.String("endpoint", tryE)) return diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go index eafa097e9..de8afbfb5 100644 --- a/pkg/morph/client/netmap/client.go +++ b/pkg/morph/client/netmap/client.go @@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("can't create netmap static client: %w", err) + return nil, fmt.Errorf("create 'netmap' contract client: %w", err) } return &Client{client: sc}, nil @@ -65,15 +65,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 2d19a8193..3f6aed506 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -1,7 +1,7 @@ package netmap import ( - "errors" + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -24,75 +24,45 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. -func (c *Client) MaxObjectSize() (uint64, error) { - objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err) - } - - return objectSize, nil +func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, MaxObjectSizeConfig) } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. -func (c *Client) EpochDuration() (uint64, error) { - epochDuration, err := c.readUInt64Config(EpochDurationConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err) - } - - return epochDuration, nil +func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, EpochDurationConfig) } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. -func (c *Client) ContainerFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err) - } - - return fee, nil +func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, ContainerFeeConfig) } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. -func (c *Client) ContainerAliasFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerAliasFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err) - } - - return fee, nil +func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, ContainerAliasFeeConfig) } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing // settings. // // Returns (false, nil) if config key is not found in the contract. -func (c *Client) HomomorphicHashDisabled() (bool, error) { - return c.readBoolConfig(HomomorphicHashingDisabledKey) +func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey) } // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. -func (c *Client) InnerRingCandidateFee() (uint64, error) { - fee, err := c.readUInt64Config(IrCandidateFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err) - } - - return fee, nil +func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, IrCandidateFeeConfig) } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. -func (c *Client) WithdrawFee() (uint64, error) { - fee, err := c.readUInt64Config(WithdrawFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err) - } - - return fee, nil +func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, WithdrawFeeConfig) } // MaintenanceModeAllowed reads admission of "maintenance" state from the @@ -100,34 +70,32 @@ func (c *Client) WithdrawFee() (uint64, error) { // that storage nodes are allowed to switch their state to "maintenance". // // By default, maintenance state is disallowed. -func (c *Client) MaintenanceModeAllowed() (bool, error) { - return c.readBoolConfig(MaintenanceModeAllowedConfig) +func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig) } -func (c *Client) readUInt64Config(key string) (uint64, error) { - v, err := c.config([]byte(key), IntegerAssert) +func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { + v, err := c.config(ctx, []byte(key)) + if err != nil { + return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) + } + + bi, err := v.TryInteger() if err != nil { return 0, err } - - // IntegerAssert is guaranteed to return int64 if the error is nil. - return uint64(v.(int64)), nil + return bi.Uint64(), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. -func (c *Client) readBoolConfig(key string) (bool, error) { - v, err := c.config([]byte(key), BoolAssert) +func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { + v, err := c.config(ctx, []byte(key)) if err != nil { - if errors.Is(err, ErrConfigNotFound) { - return false, nil - } - - return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err) + return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // BoolAssert is guaranteed to return bool if the error is nil. - return v.(bool), nil + return v.TryBool() } // SetConfigPrm groups parameters of SetConfig operation. @@ -155,13 +123,13 @@ func (s *SetConfigPrm) SetValue(value any) { } // SetConfig sets config field. -func (c *Client) SetConfig(p SetConfigPrm) error { +func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error { prm := client.InvokePrm{} prm.SetMethod(setConfigMethod) prm.SetArgs(p.id, p.key, p.value) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } @@ -198,14 +166,14 @@ type NetworkConfiguration struct { } // ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain. -func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { +func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) { var res NetworkConfiguration prm := client.TestInvokePrm{} prm.SetMethod(configListMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return res, fmt.Errorf("could not perform test invocation (%s): %w", + return res, fmt.Errorf("test invoke (%s): %w", configListMethod, err) } @@ -276,22 +244,18 @@ func bytesToBool(val []byte) bool { return false } -// ErrConfigNotFound is returned when the requested key was not found -// in the network config (returned value is `Null`). -var ErrConfigNotFound = errors.New("config value not found") - // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) { +func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", configMethod, err) } @@ -300,26 +264,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a configMethod, ln) } - if _, ok := items[0].(stackitem.Null); ok { - return nil, ErrConfigNotFound - } - - return assert(items[0]) -} - -// IntegerAssert converts stack item to int64. -func IntegerAssert(item stackitem.Item) (any, error) { - return client.IntFromStackItem(item) -} - -// StringAssert converts stack item to string. -func StringAssert(item stackitem.Item) (any, error) { - return client.StringFromStackItem(item) -} - -// BoolAssert converts stack item to bool. -func BoolAssert(item stackitem.Item) (any, error) { - return client.BoolFromStackItem(item) + return items[0], nil } // iterateRecords iterates over all config records and passes them to f. diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 92d569ae2..8561329ec 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,13 +9,13 @@ import ( // Epoch receives number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) Epoch() (uint64, error) { +func (c *Client) Epoch(ctx context.Context) (uint64, error) { prm := client.TestInvokePrm{} prm.SetMethod(epochMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", epochMethod, err) } @@ -25,20 +26,20 @@ func (c *Client) Epoch() (uint64, error) { num, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err) + return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err) } return uint64(num), nil } // LastEpochBlock receives block number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) LastEpochBlock() (uint32, error) { +func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) { prm := client.TestInvokePrm{} prm.SetMethod(lastEpochBlockMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", lastEpochBlockMethod, err) } @@ -49,7 +50,7 @@ func (c *Client) LastEpochBlock() (uint32, error) { block, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", + return 0, fmt.Errorf("get number from stack item (%s): %w", lastEpochBlockMethod, err) } return uint32(block), nil diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index d6f8c56b2..0e1f9186b 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "crypto/elliptic" "fmt" @@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) { } // UpdateInnerRing updates inner ring keys. -func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { +func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { args := make([][]byte, len(p.keys)) for i := range args { args[i] = p.keys[i].Bytes() @@ -34,18 +35,18 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { prm.SetArgs(args) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } // GetInnerRingList return current IR list. -func (c *Client) GetInnerRingList() (keys.PublicKeys, error) { +func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(innerRingListMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) } return irKeysFromStackItem(prms, innerRingListMethod) @@ -58,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys irs, err := client.ArrayFromStackItem(stack[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err) + return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err) } irKeys := make(keys.PublicKeys, len(irs)) @@ -78,7 +79,7 @@ const irNodeFixedPrmNumber = 1 func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { prms, err := client.ArrayFromStackItem(prm) if err != nil { - return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err) + return nil, fmt.Errorf("get stack item array (IRNode): %w", err) } else if ln := len(prms); ln != irNodeFixedPrmNumber { return nil, fmt.Errorf( "unexpected stack item count (IRNode): expected %d, has %d", @@ -89,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { byteKey, err := client.BytesFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err) + return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err) } return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256()) diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index 61bbf5f17..97782fc25 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -11,14 +12,14 @@ import ( // GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and // decodes netmap.NetMap from the response. -func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(epochSnapshotMethod) invokePrm.SetArgs(epoch) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", epochSnapshotMethod, err) } @@ -34,13 +35,13 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { // GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo // from the response. -func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { +func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapCandidatesMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) } if len(res) > 0 { @@ -51,13 +52,13 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { } // NetMap calls "netmap" method and decode netmap.NetMap from the response. -func (c *Client) NetMap() (*netmap.NetMap, error) { +func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", netMapMethod, err) } @@ -136,11 +137,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error { default: return fmt.Errorf("unsupported state %v", node.State) case netmapcontract.NodeStateOnline: - dst.SetOnline() + dst.SetStatus(netmap.Online) case netmapcontract.NodeStateOffline: - dst.SetOffline() + dst.SetStatus(netmap.Offline) case netmapcontract.NodeStateMaintenance: - dst.SetMaintenance() + dst.SetStatus(netmap.Maintenance) } return nil diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go index a8a306197..e686e271e 100644 --- a/pkg/morph/client/netmap/netmap_test.go +++ b/pkg/morph/client/netmap/netmap_test.go @@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) { switch i % 3 { default: - expected[i].SetOffline() + expected[i].SetStatus(netmap.Offline) case int(netmapcontract.NodeStateOnline): - expected[i].SetOnline() + expected[i].SetStatus(netmap.Online) case int(netmapcontract.NodeStateMaintenance): - expected[i].SetMaintenance() + expected[i].SetStatus(netmap.Maintenance) } expected[i].SetPublicKey(pub) @@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) { var state int64 - switch { - case expected[i].IsOnline(): + switch expected[i].Status() { + case netmap.Online: state = int64(netmapcontract.NodeStateOnline) - case expected[i].IsOffline(): + case netmap.Offline: state = int64(netmapcontract.NodeStateOffline) - case expected[i].IsMaintenance(): + case netmap.Maintenance: state = int64(netmapcontract.NodeStateMaintenance) } diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index ded386c86..341b20935 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,14 +9,14 @@ import ( // NewEpoch updates FrostFS epoch number through // Netmap contract call. -func (c *Client) NewEpoch(epoch uint64) error { +func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return nil } @@ -24,16 +25,16 @@ func (c *Client) NewEpoch(epoch uint64) error { // control notary transaction internally to ensure all // nodes produce the same transaction with high probability. // If vub > 0, vub will be used as valid until block value. -func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) { +func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) prm.SetControlTX(true) prm.SetVUB(vub) - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 764bbc899..e83acde39 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -24,7 +25,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) { // AddPeer registers peer in FrostFS network through // Netmap contract call. -func (c *Client) AddPeer(p AddPeerPrm) error { +func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { method := addPeerMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -39,15 +40,15 @@ func (c *Client) AddPeer(p AddPeerPrm) error { prm.SetArgs(p.nodeInfo.Marshal()) prm.InvokePrmOptional = p.InvokePrmOptional - if _, err := c.client.Invoke(prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + if _, err := c.client.Invoke(ctx, prm); err != nil { + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } // ForceRemovePeer marks the given peer as offline via a notary control transaction. // If vub > 0, vub will be used as valid until block value. -func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { +func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { if !c.client.WithNotary() { return 0, errFailedToRemovePeerWithoutNotary } @@ -57,9 +58,9 @@ func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := c.UpdatePeerState(prm) + res, err := c.UpdatePeerState(ctx, prm) if err != nil { return 0, fmt.Errorf("updating peer state: %v", err) } - return vub, nil + return res.VUB, nil } diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go index ba2c26af7..9dbec1a90 100644 --- a/pkg/morph/client/netmap/snapshot.go +++ b/pkg/morph/client/netmap/snapshot.go @@ -1,19 +1,22 @@ package netmap import ( + "context" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) // GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response. -func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { prm := client.TestInvokePrm{} prm.SetMethod(snapshotMethod) prm.SetArgs(diff) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, err + return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) } return DecodeNetMap(res) diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go index 7c3a4e8cd..f9f639c19 100644 --- a/pkg/morph/client/netmap/update_state.go +++ b/pkg/morph/client/netmap/update_state.go @@ -1,7 +1,7 @@ package netmap import ( - "fmt" + "context" "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() { } // UpdatePeerState changes peer status through Netmap contract call. -func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { +func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) { method := updateStateMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -55,9 +55,5 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { prm.SetArgs(int64(p.state), p.key) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) - if err != nil { - return 0, fmt.Errorf("could not invoke smart contract: %w", err) - } - return res.VUB, nil + return c.client.Invoke(ctx, prm) } diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index 218f7ad8e..bc00eb889 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,14 +8,12 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -37,12 +35,8 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -var ( - // ErrNNSRecordNotFound means that there is no such record in NNS contract. - ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") - - errEmptyResultStack = errors.New("returned result stack is empty") -) +// ErrNNSRecordNotFound means that there is no such record in NNS contract. +var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -61,97 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - nnsHash, err := c.NNSHash() - if err != nil { - return util.Uint160{}, err - } - - sh, err = nnsResolve(c.client, nnsHash, name) + sh, err = nnsResolve(c.nnsReader, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -// NNSHash returns NNS contract hash. -func (c *Client) NNSHash() (util.Uint160, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint160{}, ErrConnectionLost - } - - success := false - startedAt := time.Now() - - defer func() { - c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) - }() - - nnsHash := c.cache.nns() - - if nnsHash == nil { - cs, err := c.client.GetContractStateByID(nnsContractID) - if err != nil { - return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) - } - - c.cache.setNNSHash(cs.Hash) - nnsHash = &cs.Hash - } - success = true - return *nnsHash, nil -} - -func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { - found, err := exists(c, nnsHash, domain) +func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { + available, err := r.IsAvailable(domain) if err != nil { - return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err) + return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if !found { + if available { return nil, ErrNNSRecordNotFound } - result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - { - Type: smartcontract.IntegerType, - Value: big.NewInt(int64(nns.TXT)), - }, - }, nil) - if err != nil { - return nil, err - } - if result.State != vmstate.Halt.String() { - return nil, fmt.Errorf("invocation failed: %s", result.FaultException) - } - if len(result.Stack) == 0 { - return nil, errEmptyResultStack - } - return result.Stack[0], nil + return r.Resolve(domain, big.NewInt(int64(nns.TXT))) } -func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { - res, err := nnsResolveItem(c, nnsHash, domain) +func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { + arr, err := nnsResolveItem(r, domain) if err != nil { return util.Uint160{}, err } - // Parse the result of resolving NNS record. - // It works with multiple formats (corresponding to multiple NNS versions). - // If array of hashes is provided, it returns only the first one. - if arr, ok := res.Value().([]stackitem.Item); ok { - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - res = arr[0] + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") } - bs, err := res.TryBytes() + bs, err := arr[0].TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -171,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti return util.Uint160{}, errors.New("no valid hashes are found") } -func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { - result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - }, nil) - if err != nil { - return false, err - } - - if len(result.Stack) == 0 { - return false, errEmptyResultStack - } - - res := result.Stack[0] - - available, err := res.TryBool() - if err != nil { - return false, fmt.Errorf("malformed response: %w", err) - } - - // not available means that it is taken - // and, therefore, exists - return !available, nil -} - // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -241,18 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - nnsHash, err := c.NNSHash() + arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) if err != nil { return nil, err } - item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) - if err != nil { - return nil, err - } - - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) == 0 { + if len(arr) == 0 { return nil, errors.New("NNS record is missing") } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 4865b43ef..448702613 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -1,6 +1,7 @@ package client import ( + "context" "crypto/elliptic" "encoding/binary" "errors" @@ -37,8 +38,7 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - notary util.Uint160 - proxy util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -57,16 +57,11 @@ const ( defaultNotaryValidTime = 50 defaultNotaryRoundTime = 100 - notaryBalanceOfMethod = "balanceOf" - notaryExpirationOfMethod = "expirationOf" - setDesignateMethod = "designateAsRole" + setDesignateMethod = "designateAsRole" - notaryBalanceErrMsg = "can't fetch notary balance" notaryNotEnabledPanicMsg = "notary support was not enabled on this client" ) -var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack") - func defaultNotaryConfig(c *Client) *notaryCfg { return ¬aryCfg{ txValidTime: defaultNotaryValidTime, @@ -106,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, - notary: notary.Hash, } c.notary = notaryCfg @@ -140,7 +134,7 @@ func (c *Client) ProbeNotary() (res bool) { // use this function. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) { +func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -154,16 +148,18 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin bc, err := c.rpcActor.GetBlockCount() if err != nil { - return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err) + return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err) } - currentTill, err := c.depositExpirationOf() + r := notary.NewReader(c.rpcActor) + currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash()) if err != nil { - return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err) + return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err) } - till := max(int64(bc+delta), currentTill) - return c.depositNotary(amount, till) + till := max(int64(bc+delta), int64(currentTill)) + res, _, err := c.depositNotary(ctx, amount, till) + return res, err } // DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`, @@ -171,12 +167,12 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin // This allows to avoid ValidAfterDeposit failures. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) { +func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return util.Uint256{}, ErrConnectionLost + return util.Uint256{}, 0, ErrConnectionLost } if c.notary == nil { @@ -184,37 +180,37 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, e } // till value refers to a block height and it is uint32 value in neo-go - return c.depositNotary(amount, math.MaxUint32) + return c.depositNotary(ctx, amount, math.MaxUint32) } -func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) { +func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - c.notary.notary, + notary.Hash, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { if !errors.Is(err, neorpc.ErrAlreadyExists) { - return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err) } // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade, + c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), zap.Error(err)) - return util.Uint256{}, nil + return util.Uint256{}, 0, nil } - c.logger.Info(logs.ClientNotaryDepositInvoke, + c.logger.Info(ctx, logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) - return txHash, nil + return txHash, vub, nil } // GetNotaryDeposit returns deposit of client's account in notary contract. @@ -235,18 +231,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) { sh := c.acc.PrivateKey().PublicKey().GetScriptHash() - items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh) + r := notary.NewReader(c.rpcActor) + bigIntDeposit, err := r.BalanceOf(sh) if err != nil { - return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err) - } - - if len(items) != 1 { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems)) - } - - bigIntDeposit, err := items[0].TryInteger() - if err != nil { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)) + return 0, fmt.Errorf("get notary deposit: %w", err) } return bigIntDeposit.Int64(), nil @@ -273,7 +261,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) { // committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { +func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -287,10 +275,11 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -321,7 +310,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) { // Requires committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { +func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -335,10 +324,11 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -354,19 +344,19 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { // Returns valid until block value. // // `nonce` and `vub` are used only if notary is enabled. -func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, true, contract, nonce, vub, method, args...) + return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...) } // NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's @@ -374,19 +364,19 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui // not expected to be signed by the current node. // // Considered to be used by non-IR nodes. -func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...) + return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...) } // NotarySignAndInvokeTX signs and sends notary request that was received from @@ -403,7 +393,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { alphabetList, err := c.notary.alphabetSource() if err != nil { - return fmt.Errorf("could not fetch current alphabet keys: %w", err) + return fmt.Errorf("fetch current alphabet keys: %w", err) } cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList) @@ -428,7 +418,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return err } - c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked, + c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked, zap.String("tx_hash", mainH.StringLE()), zap.Uint32("valid_until_block", untilActual), zap.String("fallback_hash", fbH.StringLE())) @@ -436,13 +426,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return nil } -func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error { +func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error { designate := c.GetDesignateHash() - _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...) + _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...) return err } -func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { start := time.Now() success := false defer func() { @@ -451,27 +441,27 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint alphabetList, err := c.notary.alphabetSource() if err != nil { - return 0, err + return InvokeRes{}, err } until, err := c.getUntilValue(vub) if err != nil { - return 0, err + return InvokeRes{}, err } cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee) if err != nil { - return 0, err + return InvokeRes{}, err } nAct, err := notary.NewActor(c.client, cosigners, c.acc) if err != nil { - return 0, err + return InvokeRes{}, err } mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.ValidUntilBlock = until @@ -481,17 +471,17 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint }, args...)) if err != nil && !alreadyOnChainError(err) { - return 0, err + return InvokeRes{}, err } - c.logger.Debug(logs.ClientNotaryRequestInvoked, + c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked, zap.String("method", method), zap.Uint32("valid_until_block", untilActual), zap.String("tx_hash", mainH.StringLE()), zap.String("fallback_hash", fbH.StringLE())) success = true - return until, nil + return InvokeRes{Hash: mainH, VUB: until}, nil } func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) { @@ -525,24 +515,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet if ok { pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err) } acc = notary.FakeSimpleAccount(pub) } else { m, pubsBytes, ok := vm.ParseMultiSigContract(script) if !ok { - return nil, errors.New("failed to parse verification script of signer #2: unknown witness type") + return nil, errors.New("parse verification script of signer #2: unknown witness type") } pubs := make(keys.PublicKeys, len(pubsBytes)) for i := range pubs { pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err) } } acc, err = notary.FakeMultisigAccount(m, pubs) if err != nil { - return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err) + return nil, fmt.Errorf("create fake account for signer #2: %w", err) } } } @@ -618,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) } } else { // alphabet multisig redeem script is @@ -627,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) } } @@ -638,33 +626,15 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB func (c *Client) notaryTxValidationLimit() (uint32, error) { bc, err := c.rpcActor.GetBlockCount() if err != nil { - return 0, fmt.Errorf("can't get current blockchain height: %w", err) + return 0, fmt.Errorf("get current blockchain height: %w", err) } - min := bc + c.notary.txValidTime - rounded := (min/c.notary.roundTime + 1) * c.notary.roundTime + minTime := bc + c.notary.txValidTime + rounded := (minTime/c.notary.roundTime + 1) * c.notary.roundTime return rounded, nil } -func (c *Client) depositExpirationOf() (int64, error) { - expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash()) - if err != nil { - return 0, fmt.Errorf("can't invoke method: %w", err) - } - - if len(expirationRes) != 1 { - return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes)) - } - - currentTillBig, err := expirationRes[0].TryInteger() - if err != nil { - return 0, fmt.Errorf("can't parse deposit till value: %w", err) - } - - return currentTillBig.Int64(), nil -} - // sigCount returns the number of required signature. // For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT). // If committee is true, returns M as N/2+1. @@ -738,12 +708,12 @@ func alreadyOnChainError(err error) bool { func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) { notaryBalance, err := c.GetNotaryDeposit() if err != nil { - return 0, fmt.Errorf("could not get notary balance: %w", err) + return 0, fmt.Errorf("get notary balance: %w", err) } gasBalance, err := c.GasBalance() if err != nil { - return 0, fmt.Errorf("could not get GAS balance: %w", err) + return 0, fmt.Errorf("get GAS balance: %w", err) } if gasBalance == 0 { @@ -792,12 +762,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) if hash != nil { height, err = c.getTransactionHeight(*hash) if err != nil { - return 0, 0, fmt.Errorf("could not get transaction height: %w", err) + return 0, 0, fmt.Errorf("get transaction height: %w", err) } } else { height, err = c.rpcActor.GetBlockCount() if err != nil { - return 0, 0, fmt.Errorf("could not get chain height: %w", err) + return 0, 0, fmt.Errorf("get chain height: %w", err) } } diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index dfcf62b83..c4eb120d2 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -1,8 +1,10 @@ package client import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -128,7 +130,8 @@ func (i *InvokePrmOptional) SetVUB(v uint32) { } type InvokeRes struct { - VUB uint32 + Hash util.Uint256 + VUB uint32 } // Invoke calls Invoke method of Client with static internal script hash and fee. @@ -140,9 +143,7 @@ type InvokeRes struct { // // If fee for the operation executed using specified method is customized, then StaticClient uses it. // Otherwise, default fee is used. -func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { - var res InvokeRes - var err error +func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) { var vubP *uint32 if s.tryNotary { if s.alpha { @@ -159,7 +160,7 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash) } if err != nil { - return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err) + return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err) } vubP = &vub @@ -169,25 +170,23 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) - return res, err + return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) } if prm.vub > 0 { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...) - return res, err + return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) } - res.VUB, err = s.client.Invoke( + return s.client.Invoke( + ctx, s.scScriptHash, s.fee, prm.method, prm.args..., ) - return res, err } // TestInvokePrm groups parameters of the TestInvoke operation. @@ -207,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) { } // TestInvoke calls TestInvoke method of Client with static internal script hash. -func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) { +func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) { + _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method) + defer span.End() return s.client.TestInvoke( s.scScriptHash, prm.method, diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index cd55d6bd2..f7b6705a8 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) { case stackitem.IntegerT: n, err := param.TryInteger() if err != nil { - return nil, fmt.Errorf("can't parse integer bytes: %w", err) + return nil, fmt.Errorf("parse integer bytes: %w", err) } return n.Bytes(), nil @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go new file mode 100644 index 000000000..87fcf84b8 --- /dev/null +++ b/pkg/morph/client/waiter.go @@ -0,0 +1,51 @@ +package client + +import ( + "context" + "fmt" + + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" +) + +type waiterClient struct { + c *Client +} + +func (w *waiterClient) Context() context.Context { + return context.Background() +} + +func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { + return w.c.GetApplicationLog(hash, trig) +} + +func (w *waiterClient) GetBlockCount() (uint32, error) { + return w.c.BlockCount() +} + +func (w *waiterClient) GetVersion() (*result.Version, error) { + return w.c.GetVersion() +} + +// WaitTxHalt waits until transaction with the specified hash persists on the blockchain. +// It also checks execution result to finish in HALT state. +func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error { + w, err := waiter.NewPollingBased(&waiterClient{c: c}) + if err != nil { + return fmt.Errorf("create tx waiter: %w", err) + } + + res, err := w.WaitAny(ctx, vub, h) + if err != nil { + return fmt.Errorf("wait until tx persists: %w", err) + } + + if res.VMState.HasFlag(vmstate.Halt) { + return nil + } + return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} +} diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go index 062a2a886..99f80584a 100644 --- a/pkg/morph/event/balance/lock.go +++ b/pkg/morph/event/balance/lock.go @@ -3,7 +3,7 @@ package balance import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash } // ParseLock from notification into lock structure. func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Lock - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var le balance.LockEvent + if err := le.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse balance.LockEvent: %w", err) } - if ln := len(params); ln != 5 { - return nil, event.WrongNumberOfParameters(5, ln) - } - - // parse id - ev.id, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get lock id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get lock user value: %w", err) - } - - ev.user, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get lock account value: %w", err) - } - - ev.lock, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err) - } - - // parse amount - ev.amount, err = client.IntFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get lock amount: %w", err) - } - - // parse until deadline - ev.until, err = client.IntFromStackItem(params[4]) - if err != nil { - return nil, fmt.Errorf("could not get lock deadline: %w", err) - } - - ev.txHash = e.Container - - return ev, nil + return Lock{ + id: le.TxID, + user: le.From, + lock: le.To, + amount: le.Amount.Int64(), + until: le.Until.Int64(), + txHash: e.Container, + }, nil } diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go index 9199bcd55..87b91aede 100644 --- a/pkg/morph/event/balance/lock_test.go +++ b/pkg/morph/event/balance/lock_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) { } _, err := ParseLock(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go index a206307f8..d28f6d521 100644 --- a/pkg/morph/event/container/delete.go +++ b/pkg/morph/event/container/delete.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -58,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {} // ParseDeleteSuccess decodes notification event thrown by Container contract into // DeleteSuccess and returns it as event.Event. func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var dse container.DeleteSuccessEvent + if err := dse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err) } - const expectedItemNumDeleteSuccess = 1 - - if ln := len(items); ln != expectedItemNumDeleteSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - var res DeleteSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(dse.ContainerID) + return DeleteSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go index 627c5fcf5..62e7d7277 100644 --- a/pkg/morph/event/container/delete_test.go +++ b/pkg/morph/event/container/delete_test.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -18,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) { } _, err := ParseDeleteSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container parameter", func(t *testing.T) { diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go deleted file mode 100644 index 4168d8842..000000000 --- a/pkg/morph/event/container/eacl.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// SetEACL represents structure of notification about -// modified eACL table coming from FrostFS Container contract. -type SetEACL struct { - TableValue []byte - SignatureValue []byte - PublicKeyValue []byte - TokenValue []byte - - // For notary notifications only. - // Contains raw transactions of notary request. - NotaryRequestValue *payload.P2PNotaryRequest -} - -// MorphEvent implements Neo:Morph Event interface. -func (SetEACL) MorphEvent() {} - -// Table returns returns eACL table in a binary FrostFS API format. -func (x SetEACL) Table() []byte { - return x.TableValue -} - -// Signature returns signature of the binary table. -func (x SetEACL) Signature() []byte { - return x.SignatureValue -} - -// PublicKey returns public keys of container -// owner in a binary format. -func (x SetEACL) PublicKey() []byte { - return x.PublicKeyValue -} - -// SessionToken returns binary token of the session -// within which the eACL was set. -func (x SetEACL) SessionToken() []byte { - return x.TokenValue -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest { - return x.NotaryRequestValue -} - -const expectedItemNumEACL = 4 diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go deleted file mode 100644 index a4fe7c966..000000000 --- a/pkg/morph/event/container/eacl_notary.go +++ /dev/null @@ -1,75 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func (x *SetEACL) setTable(v []byte) { - if v != nil { - x.TableValue = v - } -} - -func (x *SetEACL) setSignature(v []byte) { - if v != nil { - x.SignatureValue = v - } -} - -func (x *SetEACL) setPublicKey(v []byte) { - if v != nil { - x.PublicKeyValue = v - } -} - -func (x *SetEACL) setToken(v []byte) { - if v != nil { - x.TokenValue = v - } -} - -var setEACLFieldSetters = []func(*SetEACL, []byte){ - // order on stack is reversed - (*SetEACL).setToken, - (*SetEACL).setPublicKey, - (*SetEACL).setSignature, - (*SetEACL).setTable, -} - -const ( - // SetEACLNotaryEvent is method name for container EACL operations - // in `Container` contract. Is used as identificator for notary - // EACL changing requests. - SetEACLNotaryEvent = "setEACL" -) - -// ParseSetEACLNotary from NotaryEvent into container event structure. -func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) { - var ( - ev SetEACL - currentOp opcode.Opcode - ) - - fieldNum := 0 - - for _, op := range ne.Params() { - currentOp = op.Code() - - switch { - case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4: - if fieldNum == expectedItemNumEACL { - return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent) - } - - setEACLFieldSetters[fieldNum](&ev, op.Param()) - fieldNum++ - default: - return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code()) - } - } - - ev.NotaryRequestValue = ne.Raw() - - return ev, nil -} diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go deleted file mode 100644 index 9fd21e2b5..000000000 --- a/pkg/morph/event/container/estimates.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// StartEstimation structure of container.StartEstimation notification from -// morph chain. -type StartEstimation struct { - epoch uint64 -} - -// StopEstimation structure of container.StopEstimation notification from -// morph chain. -type StopEstimation struct { - epoch uint64 -} - -// MorphEvent implements Neo:Morph Event interface. -func (StartEstimation) MorphEvent() {} - -// MorphEvent implements Neo:Morph Event interface. -func (StopEstimation) MorphEvent() {} - -// Epoch returns epoch value for which to start container size estimation. -func (s StartEstimation) Epoch() uint64 { return s.epoch } - -// Epoch returns epoch value for which to stop container size estimation. -func (s StopEstimation) Epoch() uint64 { return s.epoch } - -// ParseStartEstimation from notification into container event structure. -func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - epoch, err := parseEstimation(params) - if err != nil { - return nil, err - } - - return StartEstimation{epoch: epoch}, nil -} - -// ParseStopEstimation from notification into container event structure. -func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - epoch, err := parseEstimation(params) - if err != nil { - return nil, err - } - - return StopEstimation{epoch: epoch}, nil -} - -func parseEstimation(params []stackitem.Item) (uint64, error) { - if ln := len(params); ln != 1 { - return 0, event.WrongNumberOfParameters(1, ln) - } - - // parse container - epoch, err := client.IntFromStackItem(params[0]) - if err != nil { - return 0, fmt.Errorf("could not get estimation epoch: %w", err) - } - - return uint64(epoch), nil -} diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go deleted file mode 100644 index be46e62c4..000000000 --- a/pkg/morph/event/container/estimates_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package container - -import ( - "math/big" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestStartEstimation(t *testing.T) { - var epochNum uint64 = 100 - epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum)) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseStartEstimation(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong estimation parameter", func(t *testing.T) { - _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{ - epochItem, - })) - - require.NoError(t, err) - - require.Equal(t, StartEstimation{ - epochNum, - }, ev) - }) -} - -func TestStopEstimation(t *testing.T) { - var epochNum uint64 = 100 - epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum)) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseStopEstimation(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong estimation parameter", func(t *testing.T) { - _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{ - epochItem, - })) - - require.NoError(t, err) - - require.Equal(t, StopEstimation{ - epochNum, - }, ev) - }) -} diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go index 335034bf3..b09394ba4 100644 --- a/pkg/morph/event/container/put.go +++ b/pkg/morph/event/container/put.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -78,33 +78,14 @@ func (PutSuccess) MorphEvent() {} // ParsePutSuccess decodes notification event thrown by Container contract into // PutSuccess and returns it as event.Event. func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var pse container.PutSuccessEvent + if err := pse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err) } - const expectedItemNumPutSuccess = 2 - - if ln := len(items); ln != expectedItemNumPutSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - _, err = client.BytesFromStackItem(items[1]) - if err != nil { - return nil, fmt.Errorf("parse public key item: %w", err) - } - - var res PutSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(pse.ContainerID) + return PutSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go index f5779ced6..6b2ee7b0a 100644 --- a/pkg/morph/event/container/put_notary.go +++ b/pkg/morph/event/container/put_notary.go @@ -46,7 +46,7 @@ const ( // put container requests. PutNotaryEvent = "put" - // PutNotaryEvent is an ID of notary "put named container" notification. + // PutNamedNotaryEvent is an ID of notary "put named container" notification. PutNamedNotaryEvent = "putNamed" ) diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go index 3622f9943..dd5c7ea93 100644 --- a/pkg/morph/event/container/put_test.go +++ b/pkg/morph/event/container/put_test.go @@ -4,8 +4,8 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) { } _, err := ParsePutSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container ID parameter", func(t *testing.T) { @@ -35,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) { id.Encode(binID) t.Run("wrong public key parameter", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewMap(), - })) + t.Run("wrong type", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewMap(), + })) - require.Error(t, err) + require.Error(t, err) + }) + t.Run("garbage data", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewByteArray([]byte("key")), + })) + require.Error(t, err) + }) }) t.Run("correct behavior", func(t *testing.T) { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(binID), - stackitem.NewByteArray([]byte("key")), + stackitem.NewByteArray(pk.PublicKey().Bytes()), })) require.NoError(t, err) diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/util_test.go similarity index 100% rename from pkg/morph/event/container/eacl_test.go rename to pkg/morph/event/container/util_test.go diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go index eae2a23f5..cf56464b8 100644 --- a/pkg/morph/event/frostfs/cheque.go +++ b/pkg/morph/event/frostfs/cheque.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,53 +34,20 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue } // ParseCheque from notification into cheque structure. func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Cheque - err error - ) + var ce frostfs.ChequeEvent + if err := ce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err) + } - params, err := event.ParseStackArray(e) + lock, err := util.Uint160DecodeBytesBE(ce.LockAccount) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get cheque id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get cheque user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get cheque amount: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get cheque lock account: %w", err) - } - - ev.LockValue, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err) - } - - return ev, nil + return Cheque{ + IDValue: ce.Id, + AmountValue: ce.Amount.Int64(), + UserValue: ce.User, + LockValue: lock, + }, nil } diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go index ab177757f..d92b7922b 100644 --- a/pkg/morph/event/frostfs/cheque_test.go +++ b/pkg/morph/event/frostfs/cheque_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -27,7 +26,7 @@ func TestParseCheque(t *testing.T) { } _, err := ParseCheque(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go index 4c87634c2..805e80f3c 100644 --- a/pkg/morph/event/frostfs/config.go +++ b/pkg/morph/event/frostfs/config.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -36,39 +36,15 @@ func (u Config) Key() []byte { return u.KeyValue } func (u Config) Value() []byte { return u.ValueValue } func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Config - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var sce frostfs.SetConfigEvent + if err := sce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get config update id: %w", err) - } - - // parse key - ev.KeyValue, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get config key: %w", err) - } - - // parse value - ev.ValueValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get config value: %w", err) - } - - ev.TxHashValue = e.Container - - return ev, nil + return Config{ + KeyValue: sce.Key, + ValueValue: sce.Value, + IDValue: sce.Id, + TxHashValue: e.Container, + }, nil } diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go index dcd4201e4..8acc8c15c 100644 --- a/pkg/morph/event/frostfs/config_test.go +++ b/pkg/morph/event/frostfs/config_test.go @@ -3,7 +3,6 @@ package frostfs import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) { } _, err := ParseConfig(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go index d8a3b82f0..fcb01577e 100644 --- a/pkg/morph/event/frostfs/deposit.go +++ b/pkg/morph/event/frostfs/deposit.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,50 +34,15 @@ func (d Deposit) Amount() int64 { return d.AmountValue } // ParseDeposit notification into deposit structure. func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Deposit - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var de frostfs.DepositEvent + if err := de.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse from - from, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get deposit sender: %w", err) - } - - ev.FromValue, err = util.Uint160DecodeBytesBE(from) - if err != nil { - return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get deposit amount: %w", err) - } - - // parse to - to, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get deposit receiver: %w", err) - } - - ev.ToValue, err = util.Uint160DecodeBytesBE(to) - if err != nil { - return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get deposit id: %w", err) - } - - return ev, nil + return Deposit{ + IDValue: de.TxHash[:], + AmountValue: de.Amount.Int64(), + FromValue: de.From, + ToValue: de.Receiver, + }, nil } diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go index f279a7f9c..38d3e61f6 100644 --- a/pkg/morph/event/frostfs/deposit_test.go +++ b/pkg/morph/event/frostfs/deposit_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseDeposit(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{0, 1, 2, 3} from = util.Uint160{0x1, 0x2, 0x3} to = util.Uint160{0x3, 0x2, 0x1} @@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) { } _, err := ParseDeposit(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong from parameter", func(t *testing.T) { @@ -72,12 +71,12 @@ func TestParseDeposit(t *testing.T) { stackitem.NewByteArray(from.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), stackitem.NewByteArray(to.BytesBE()), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Deposit{ - IDValue: id, + IDValue: id[:], AmountValue: amount, FromValue: from, ToValue: to, diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go deleted file mode 100644 index 62203540f..000000000 --- a/pkg/morph/event/frostfs/ir_update.go +++ /dev/null @@ -1,54 +0,0 @@ -package frostfs - -import ( - "crypto/elliptic" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -type UpdateInnerRing struct { - keys []*keys.PublicKey -} - -// MorphEvent implements Neo:Morph Event interface. -func (UpdateInnerRing) MorphEvent() {} - -func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys } - -func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) { - var ( - ev UpdateInnerRing - err error - ) - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - // parse keys - irKeys, err := client.ArrayFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring keys: %w", err) - } - - ev.keys = make([]*keys.PublicKey, 0, len(irKeys)) - for i := range irKeys { - rawKey, err := client.BytesFromStackItem(irKeys[i]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring public key: %w", err) - } - - key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err) - } - - ev.keys = append(ev.keys, key) - } - - return ev, nil -} diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go deleted file mode 100644 index fae87e5f9..000000000 --- a/pkg/morph/event/frostfs/ir_update_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package frostfs - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func genKey(t *testing.T) *keys.PrivateKey { - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - return priv -} - -func TestParseUpdateInnerRing(t *testing.T) { - publicKeys := []*keys.PublicKey{ - genKey(t).PublicKey(), - genKey(t).PublicKey(), - genKey(t).PublicKey(), - } - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseUpdateInnerRing(prms) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong first parameter", func(t *testing.T) { - _, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewMap(), - }) - - require.Error(t, err) - }) - - t.Run("correct", func(t *testing.T) { - ev, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewArray([]stackitem.Item{ - stackitem.NewByteArray(publicKeys[0].Bytes()), - stackitem.NewByteArray(publicKeys[1].Bytes()), - stackitem.NewByteArray(publicKeys[2].Bytes()), - }), - }) - require.NoError(t, err) - - require.Equal(t, UpdateInnerRing{ - keys: publicKeys, - }, ev) - }) -} diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go index f48067f86..2568b6512 100644 --- a/pkg/morph/event/frostfs/withdraw.go +++ b/pkg/morph/event/frostfs/withdraw.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,39 +30,14 @@ func (w Withdraw) Amount() int64 { return w.AmountValue } // ParseWithdraw notification into withdraw structure. func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Withdraw - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var we frostfs.WithdrawEvent + if err := we.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse user - user, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw amount: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw id: %w", err) - } - - return ev, nil + return Withdraw{ + IDValue: we.TxHash[:], + AmountValue: we.Amount.Int64(), + UserValue: we.User, + }, nil } diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go index 33435d19a..e382305e6 100644 --- a/pkg/morph/event/frostfs/withdraw_test.go +++ b/pkg/morph/event/frostfs/withdraw_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseWithdraw(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{1, 2, 3} user = util.Uint160{0x1, 0x2, 0x3} amount int64 = 10 @@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) { } _, err := ParseWithdraw(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong user parameter", func(t *testing.T) { @@ -59,12 +58,12 @@ func TestParseWithdraw(t *testing.T) { ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(user.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Withdraw{ - IDValue: id, + IDValue: id[:], AmountValue: amount, UserValue: user, }, ev) diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index 182b4667e..55a514ff1 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -1,32 +1,26 @@ package event import ( + "context" + "github.com/nspcc-dev/neo-go/pkg/core/block" + "github.com/nspcc-dev/neo-go/pkg/util" ) // Handler is an Event processing function. -type Handler func(Event) +type Handler func(context.Context, Event) // BlockHandler is a chain block processing function. -type BlockHandler func(*block.Block) +type BlockHandler func(context.Context, *block.Block) // NotificationHandlerInfo is a structure that groups // the parameters of the handler of particular // contract event. type NotificationHandlerInfo struct { - scriptHashWithType - - h Handler -} - -// SetHandler is an event handler setter. -func (s *NotificationHandlerInfo) SetHandler(v Handler) { - s.h = v -} - -// Handler returns an event handler. -func (s NotificationHandlerInfo) Handler() Handler { - return s.h + Contract util.Uint160 + Type Type + Parser NotificationParser + Handlers []Handler } // NotaryHandlerInfo is a structure that groups diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index dd3c7d216..e5cdfeef7 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -33,13 +33,6 @@ type Listener interface { // it could not be started. ListenWithError(context.Context, chan<- error) - // SetNotificationParser must set the parser of particular contract event. - // - // Parser of each event must be set once. All parsers must be set before Listen call. - // - // Must ignore nil parsers and all calls after listener has been started. - SetNotificationParser(NotificationParserInfo) - // RegisterNotificationHandler must register the event handler for particular notification event of contract. // // The specified handler must be called after each capture and parsing of the event. @@ -100,8 +93,6 @@ type listener struct { startOnce, stopOnce sync.Once - started bool - notificationParsers map[scriptHashWithType]NotificationParser notificationHandlers map[scriptHashWithType][]Handler @@ -120,7 +111,7 @@ type listener struct { pool *ants.Pool } -const newListenerFailMsg = "could not instantiate Listener" +const newListenerFailMsg = "instantiate Listener" var ( errNilLogger = errors.New("nil logger") @@ -143,11 +134,8 @@ func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, nil); err != nil { - l.log.Error(logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), - ) - } + + l.listen(ctx, nil) }) } @@ -161,26 +149,17 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, intError); err != nil { - l.log.Error(logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), - ) - l.sendError(ctx, intError, err) - } + + l.listen(ctx, intError) }) } -func (l *listener) listen(ctx context.Context, intError chan<- error) error { - // mark listener as started - l.started = true - +func (l *listener) listen(ctx context.Context, intError chan<- error) { subErrCh := make(chan error) go l.subscribe(subErrCh) l.listenLoop(ctx, intError, subErrCh) - - return nil } func (l *listener) subscribe(errCh chan error) { @@ -192,7 +171,7 @@ func (l *listener) subscribe(errCh chan error) { // fill the list with the contracts with set event parsers. l.mtx.RLock() for hashType := range l.notificationParsers { - scHash := hashType.ScriptHash() + scHash := hashType.Hash // prevent repetitions for _, hash := range hashes { @@ -201,26 +180,26 @@ func (l *listener) subscribe(errCh chan error) { } } - hashes = append(hashes, hashType.ScriptHash()) + hashes = append(hashes, hashType.Hash) } l.mtx.RUnlock() err := l.subscriber.SubscribeForNotification(hashes...) if err != nil { - errCh <- fmt.Errorf("could not subscribe for notifications: %w", err) + errCh <- fmt.Errorf("subscribe for notifications: %w", err) return } if len(l.blockHandlers) > 0 { if err = l.subscriber.BlockNotifications(); err != nil { - errCh <- fmt.Errorf("could not subscribe for blocks: %w", err) + errCh <- fmt.Errorf("subscribe for blocks: %w", err) return } } if l.listenNotary { if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil { - errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err) + errCh <- fmt.Errorf("subscribe for notary requests: %w", err) return } } @@ -234,7 +213,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error // in the same routine when shutting down node. select { case <-ctx.Done(): - l.log.Info(logs.EventStopEventListenerByContext, + l.log.Info(ctx, logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) return false @@ -251,81 +230,81 @@ loop: select { case err := <-subErrCh: if !l.sendError(ctx, intErr, err) { - l.log.Error(logs.EventStopEventListenerByError, zap.Error(err)) + l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err)) } break loop case <-ctx.Done(): - l.log.Info(logs.EventStopEventListenerByContext, + l.log.Info(ctx, logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) break loop case notifyEvent, ok := <-chs.NotificationsCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByNotificationChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel) l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated) break loop } else if notifyEvent == nil { - l.log.Warn(logs.EventNilNotificationEventWasCaught) + l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught) continue loop } - l.handleNotifyEvent(notifyEvent) + l.handleNotifyEvent(ctx, notifyEvent) case notaryEvent, ok := <-chs.NotaryRequestsCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByNotaryChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel) l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated) break loop } else if notaryEvent == nil { - l.log.Warn(logs.EventNilNotaryEventWasCaught) + l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught) continue loop } - l.handleNotaryEvent(notaryEvent) + l.handleNotaryEvent(ctx, notaryEvent) case b, ok := <-chs.BlockCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByBlockChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel) l.sendError(ctx, intErr, errBlockNotificationChannelClosed) break loop } else if b == nil { - l.log.Warn(logs.EventNilBlockWasCaught) + l.log.Warn(ctx, logs.EventNilBlockWasCaught) continue loop } - l.handleBlockEvent(b) + l.handleBlockEvent(ctx, b) } } } -func (l *listener) handleBlockEvent(b *block.Block) { +func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) { if err := l.pool.Submit(func() { for i := range l.blockHandlers { - l.blockHandlers[i](b) + l.blockHandlers[i](ctx, b) } }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } -func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { +func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) { if err := l.pool.Submit(func() { - l.parseAndHandleNotary(notaryEvent) + l.parseAndHandleNotary(ctx, notaryEvent) }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } -func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) { +func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { if err := l.pool.Submit(func() { - l.parseAndHandleNotification(notifyEvent) + l.parseAndHandleNotification(ctx, notifyEvent) }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } -func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) { +func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { log := l.log.With( zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()), ) @@ -338,16 +317,14 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi ) // get the event parser - keyEvent := scriptHashWithType{} - keyEvent.SetScriptHash(notifyEvent.ScriptHash) - keyEvent.SetType(typEvent) + keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent} l.mtx.RLock() parser, ok := l.notificationParsers[keyEvent] l.mtx.RUnlock() if !ok { - log.Debug(logs.EventEventParserNotSet) + log.Debug(ctx, logs.EventEventParserNotSet) return } @@ -355,8 +332,8 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi // parse the notification event event, err := parser(notifyEvent) if err != nil { - log.Warn(logs.EventCouldNotParseNotificationEvent, - zap.String("error", err.Error()), + log.Warn(ctx, logs.EventCouldNotParseNotificationEvent, + zap.Error(err), ) return @@ -368,7 +345,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if len(handlers) == 0 { - log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, + log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -376,11 +353,11 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi } for _, handler := range handlers { - handler(event) + handler(ctx, event) } } -func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { +func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) { // prepare the notary event notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest) if err != nil { @@ -388,14 +365,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { switch { case errors.Is(err, ErrTXAlreadyHandled): case errors.As(err, &expErr): - l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent, - zap.String("error", err.Error()), + l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent, + zap.Error(err), zap.Uint32("current_block_height", expErr.CurrentBlockHeight), zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), ) default: - l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent, - zap.String("error", err.Error()), + l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent, + zap.Error(err), ) } @@ -418,7 +395,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Debug(logs.EventNotaryParserNotSet) + log.Debug(ctx, logs.EventNotaryParserNotSet) return } @@ -426,8 +403,8 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { // parse the notary event event, err := parser(notaryEvent) if err != nil { - log.Warn(logs.EventCouldNotParseNotaryEvent, - zap.String("error", err.Error()), + log.Warn(ctx, logs.EventCouldNotParseNotaryEvent, + zap.Error(err), ) return @@ -439,47 +416,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, + log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) return } - handler(event) -} - -// SetNotificationParser sets the parser of particular contract event. -// -// Ignores nil and already set parsers. -// Ignores the parser if listener is started. -func (l *listener) SetNotificationParser(pi NotificationParserInfo) { - log := l.log.With( - zap.String("contract", pi.ScriptHash().StringLE()), - zap.Stringer("event_type", pi.getType()), - ) - - parser := pi.parser() - if parser == nil { - log.Info(logs.EventIgnoreNilEventParser) - return - } - - l.mtx.Lock() - defer l.mtx.Unlock() - - // check if the listener was started - if l.started { - log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser) - return - } - - // add event parser - if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok { - l.notificationParsers[pi.scriptHashWithType] = pi.parser() - } - - log.Debug(logs.EventRegisteredNewEventParser) + handler(ctx, event) } // RegisterNotificationHandler registers the handler for particular notification event of contract. @@ -488,35 +432,23 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { // Ignores handlers of event without parser. func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { log := l.log.With( - zap.String("contract", hi.ScriptHash().StringLE()), - zap.Stringer("event_type", hi.GetType()), + zap.String("contract", hi.Contract.StringLE()), + zap.Stringer("event_type", hi.Type), ) - handler := hi.Handler() - if handler == nil { - log.Warn(logs.EventIgnoreNilEventHandler) - return - } - // check if parser was set - l.mtx.RLock() - _, ok := l.notificationParsers[hi.scriptHashWithType] - l.mtx.RUnlock() - - if !ok { - log.Warn(logs.EventIgnoreHandlerOfEventWoParser) - return - } - - // add event handler l.mtx.Lock() - l.notificationHandlers[hi.scriptHashWithType] = append( - l.notificationHandlers[hi.scriptHashWithType], - hi.Handler(), - ) - l.mtx.Unlock() + defer l.mtx.Unlock() - log.Debug(logs.EventRegisteredNewEventHandler) + k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type} + + l.notificationParsers[k] = hi.Parser + l.notificationHandlers[k] = append( + l.notificationHandlers[k], + hi.Handlers..., + ) + + log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) } // EnableNotarySupport enables notary request listening. Passed hash is @@ -555,27 +487,15 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { zap.Stringer("notary_type", pi.RequestType()), ) - parser := pi.parser() - if parser == nil { - log.Info(logs.EventIgnoreNilNotaryEventParser) - return - } - l.mtx.Lock() defer l.mtx.Unlock() - // check if the listener was started - if l.started { - log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) - return - } - // add event parser if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() } - log.Info(logs.EventRegisteredNewEventParser) + log.Info(context.Background(), logs.EventRegisteredNewEventParser) } // RegisterNotaryHandler registers the handler for particular notification notary request event. @@ -593,19 +513,13 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { zap.Stringer("notary type", hi.RequestType()), ) - handler := hi.Handler() - if handler == nil { - log.Warn(logs.EventIgnoreNilNotaryEventHandler) - return - } - // check if parser was set l.mtx.RLock() _, ok := l.notaryParsers[hi.notaryRequestTypes] l.mtx.RUnlock() if !ok { - log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser) + log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser) return } @@ -614,7 +528,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler() l.mtx.Unlock() - log.Info(logs.EventRegisteredNewEventHandler) + log.Info(context.Background(), logs.EventRegisteredNewEventHandler) } // Stop closes subscription channel with remote neo node. @@ -627,11 +541,6 @@ func (l *listener) Stop() { } func (l *listener) RegisterBlockHandler(handler BlockHandler) { - if handler == nil { - l.log.Warn(logs.EventIgnoreNilBlockHandler) - return - } - l.blockHandlers = append(l.blockHandlers, handler) } @@ -648,7 +557,7 @@ func NewListener(p ListenerParams) (Listener, error) { // The default capacity is 0, which means "infinite". pool, err := ants.NewPool(p.WorkerPoolCapacity) if err != nil { - return nil, fmt.Errorf("could not init worker pool: %w", err) + return nil, fmt.Errorf("init worker pool: %w", err) } return &listener{ diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index 5f7cf9f43..87f37305f 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -34,34 +34,24 @@ func TestEventHandling(t *testing.T) { blockHandled := make(chan bool) handledBlocks := make([]*block.Block, 0) - l.RegisterBlockHandler(func(b *block.Block) { + l.RegisterBlockHandler(func(_ context.Context, b *block.Block) { handledBlocks = append(handledBlocks, b) blockHandled <- true }) - key := scriptHashWithType{ - scriptHashValue: scriptHashValue{ - hash: util.Uint160{100}, - }, - typeValue: typeValue{ - typ: TypeFromString("notification type"), - }, - } - - l.SetNotificationParser(NotificationParserInfo{ - scriptHashWithType: key, - p: func(cne *state.ContainedNotificationEvent) (Event, error) { - return testNotificationEvent{source: cne}, nil - }, - }) - notificationHandled := make(chan bool) handledNotifications := make([]Event, 0) l.RegisterNotificationHandler(NotificationHandlerInfo{ - scriptHashWithType: key, - h: func(e Event) { - handledNotifications = append(handledNotifications, e) - notificationHandled <- true + Contract: util.Uint160{100}, + Type: TypeFromString("notification type"), + Parser: func(cne *state.ContainedNotificationEvent) (Event, error) { + return testNotificationEvent{source: cne}, nil + }, + Handlers: []Handler{ + func(_ context.Context, e Event) { + handledNotifications = append(handledNotifications, e) + notificationHandled <- true + }, }, }) @@ -137,7 +127,7 @@ func TestErrorPassing(t *testing.T) { WorkerPoolCapacity: 10, }) require.NoError(t, err, "failed to create listener") - l.RegisterBlockHandler(func(b *block.Block) {}) + l.RegisterBlockHandler(func(context.Context, *block.Block) {}) errCh := make(chan error) diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index e454e2a6a..39c8f6237 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -1,9 +1,7 @@ package netmap import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,22 +35,13 @@ func (s NewEpoch) TxHash() util.Uint256 { // // Result is type of NewEpoch. func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - prmEpochNum, err := client.IntFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get integer epoch number: %w", err) + var nee netmap.NewEpochEvent + if err := nee.FromStackItem(e.Item); err != nil { + return nil, err } return NewEpoch{ - Num: uint64(prmEpochNum), + Num: nee.Epoch.Uint64(), Hash: e.Container, }, nil } diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go index bc267ecb6..6ff692327 100644 --- a/pkg/morph/event/netmap/epoch_test.go +++ b/pkg/morph/event/netmap/epoch_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) { } _, err := ParseNewEpoch(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter type", func(t *testing.T) { diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go index 0260810b8..993182ab4 100644 --- a/pkg/morph/event/netmap/update_peer_notary.go +++ b/pkg/morph/event/netmap/update_peer_notary.go @@ -10,7 +10,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/vm/opcode" ) -var errNilPubKey = errors.New("could not parse public key: public key is nil") +var errNilPubKey = errors.New("public key is nil") func (s *UpdatePeer) setPublicKey(v []byte) (err error) { if v == nil { @@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) { s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) if err != nil { - return fmt.Errorf("could not parse public key: %w", err) + return fmt.Errorf("parse public key: %w", err) } return diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 37091f768..b11973646 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { for { opCode, param, err = ctx.Next() if err != nil { - return nil, fmt.Errorf("could not get next opcode in script: %w", err) + return nil, fmt.Errorf("get next opcode in script: %w", err) } if opCode == opcode.RET { @@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { // retrieve contract's script hash contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param) if err != nil { - return nil, fmt.Errorf("could not decode contract hash: %w", err) + return nil, fmt.Errorf("decode contract hash: %w", err) } // retrieve contract's method @@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { if len(args) != 0 { err = p.validateParameterOpcodes(args) if err != nil { - return nil, fmt.Errorf("could not validate arguments: %w", err) + return nil, fmt.Errorf("validate arguments: %w", err) } // without args packing opcodes @@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || - bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version + if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && + !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version return ErrTXAlreadyHandled } currentAlphabet, err := p.alphaKeys() if err != nil { - return fmt.Errorf("could not fetch Alphabet public keys: %w", err) + return fmt.Errorf("fetch Alphabet public keys: %w", err) } err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet) @@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error { argsLen, err := IntFromOpcode(ops[l-2]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } err = validateNestedArgs(argsLen, ops[:l-2]) @@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error { argsLen, err := IntFromOpcode(ops[i-1]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } expArgLen += argsLen + 1 @@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error { currBlock, err := p.blockCounter.BlockCount() if err != nil { - return fmt.Errorf("could not fetch current chain height: %w", err) + return fmt.Errorf("fetch current chain height: %w", err) } if currBlock >= nvb.Height { @@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) { @@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } // the second one must be witness of the current @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 - bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 + !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go index 8da9d868a..60ddb4601 100644 --- a/pkg/morph/event/notary_preparator_test.go +++ b/pkg/morph/event/notary_preparator_test.go @@ -25,7 +25,7 @@ var ( alphaKeys keys.PublicKeys wrongAlphaKeys keys.PublicKeys - dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in + dummyAlphabetInvocationScript []byte dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...) @@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) { ) for _, test := range tests { - for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR + for i := range 1 { // run tests against 3 and 4 witness NR for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness additionalWitness := i == 0 nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness) diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go index 90eff0bd2..5adeb4b30 100644 --- a/pkg/morph/event/parsers.go +++ b/pkg/morph/event/parsers.go @@ -11,15 +11,6 @@ import ( // from the StackItem list. type NotificationParser func(*state.ContainedNotificationEvent) (Event, error) -// NotificationParserInfo is a structure that groups -// the parameters of particular contract -// notification event parser. -type NotificationParserInfo struct { - scriptHashWithType - - p NotificationParser -} - // NotaryPreparator constructs NotaryEvent // from the NotaryRequest event. type NotaryPreparator interface { @@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) { n.p = p } -// SetParser is an event parser setter. -func (s *NotificationParserInfo) SetParser(v NotificationParser) { - s.p = v -} - -func (s NotificationParserInfo) parser() NotificationParser { - return s.p -} - -// SetType is an event type setter. -func (s *NotificationParserInfo) SetType(v Type) { - s.typ = v -} - -func (s NotificationParserInfo) getType() Type { - return s.typ -} - type wrongPrmNumber struct { exp, act int } diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go index 28c968046..b384e436b 100644 --- a/pkg/morph/event/rolemanagement/designate.go +++ b/pkg/morph/event/rolemanagement/designate.go @@ -26,7 +26,7 @@ func (Designate) MorphEvent() {} func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) { params, err := event.ParseStackArray(e) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse stack items from notify event: %w", err) } if len(params) != 2 { diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index f3b6443fb..0088be400 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -1,6 +1,7 @@ package event import ( + "context" "errors" "fmt" @@ -19,13 +20,9 @@ type scriptHashValue struct { hash util.Uint160 } -type typeValue struct { - typ Type -} - type scriptHashWithType struct { - scriptHashValue - typeValue + Hash util.Uint160 + Type Type } type notaryRequestTypes struct { @@ -72,25 +69,15 @@ func (s scriptHashValue) ScriptHash() util.Uint160 { return s.hash } -// SetType is an event type setter. -func (s *typeValue) SetType(v Type) { - s.typ = v -} - -// GetType is an event type getter. -func (s typeValue) GetType() Type { - return s.typ -} - // WorkerPoolHandler sets closure over worker pool w with passed handler h. func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler { - return func(e Event) { + return func(ctx context.Context, e Event) { err := w.Submit(func() { - h(e) + h(ctx, e) }) if err != nil { - log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool, - zap.String("error", err.Error()), + log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool, + zap.Error(err), ) } } diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go index ee5466a7d..4ef59ed6a 100644 --- a/pkg/morph/subscriber/subscriber.go +++ b/pkg/morph/subscriber/subscriber.go @@ -245,16 +245,16 @@ routeloop: } func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool { - s.log.Info(logs.RPConnectionLost) + s.log.Info(ctx, logs.RPConnectionLost) if !s.client.SwitchRPC(ctx) { - s.log.Error(logs.RPCNodeSwitchFailure) + s.log.Error(ctx, logs.RPCNodeSwitchFailure) return false } s.Lock() chs := newSubChannels() go func() { - finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan) + finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan) }() s.current = chs s.Unlock() @@ -295,7 +295,7 @@ drainloop: // restoreSubscriptions restores subscriptions according to // cached information about them. -func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent, +func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent, blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent, ) bool { var err error @@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific if s.subscribedToNewBlocks { _, err = s.client.ReceiveBlocks(blCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for contract := range s.subscribedEvents { _, err = s.client.ReceiveExecutionNotifications(contract, notifCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for signer := range s.subscribedNotaryEvents { _, err = s.client.ReceiveNotaryRequests(signer, notaryCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go index be20d3571..974be1120 100644 --- a/pkg/morph/timer/block.go +++ b/pkg/morph/timer/block.go @@ -15,41 +15,19 @@ type BlockTickHandler func() // It can tick the blocks and perform certain actions // on block time intervals. type BlockTimer struct { - rolledBack bool - mtx sync.Mutex dur BlockMeter baseDur uint32 - mul, div uint32 - cur, tgt uint32 last uint32 h BlockTickHandler - ps []BlockTimer - once bool - - deltaCfg -} - -// DeltaOption is an option of delta-interval handler. -type DeltaOption func(*deltaCfg) - -type deltaCfg struct { - pulse bool -} - -// WithPulse returns option to call delta-interval handler multiple times. -func WithPulse() DeltaOption { - return func(c *deltaCfg) { - c.pulse = true - } } // StaticBlockMeter returns BlockMeters that always returns (d, nil). @@ -65,52 +43,19 @@ func StaticBlockMeter(d uint32) BlockMeter { func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { return &BlockTimer{ dur: dur, - mul: 1, - div: 1, h: h, - deltaCfg: deltaCfg{ - pulse: true, - }, } } // NewOneTickTimer creates a new BlockTimer that ticks only once. -// -// Do not use delta handlers with pulse in this timer. func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { return &BlockTimer{ dur: dur, - mul: 1, - div: 1, h: h, once: true, } } -// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block -// after basic interval reset. -// -// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block -// during base interval. -func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) { - c := deltaCfg{ - pulse: false, - } - - for i := range opts { - opts[i](&c) - } - - t.ps = append(t.ps, BlockTimer{ - mul: mul, - div: div, - h: h, - once: t.once, - - deltaCfg: c, - }) -} - // Reset resets previous ticks of the BlockTimer. // // Returns BlockMeter's error upon occurrence. @@ -124,29 +69,18 @@ func (t *BlockTimer) Reset() error { t.resetWithBaseInterval(d) - for i := range t.ps { - t.ps[i].resetWithBaseInterval(d) - } - t.mtx.Unlock() return nil } func (t *BlockTimer) resetWithBaseInterval(d uint32) { - t.rolledBack = false t.baseDur = d t.reset() } func (t *BlockTimer) reset() { - mul, div := t.mul, t.div - - if !t.pulse && t.rolledBack && mul < div { - mul, div = 1, 1 - } - - delta := mul * t.baseDur / div + delta := t.baseDur if delta == 0 { delta = 1 } @@ -180,12 +114,7 @@ func (t *BlockTimer) tick(h uint32) { if !t.once { t.cur = 0 - t.rolledBack = true t.reset() } } - - for i := range t.ps { - t.ps[i].tick(h) - } } diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index 93bb04de5..a144b3db6 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -1,6 +1,7 @@ package timer_test import ( + "errors" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" @@ -8,7 +9,7 @@ import ( ) func tickN(t *timer.BlockTimer, n uint32) { - for i := uint32(0); i < n; i++ { + for range n { t.Tick(0) } } @@ -17,7 +18,7 @@ func tickN(t *timer.BlockTimer, n uint32) { // "resetting" consists of ticking the current height as well and invoking `Reset`. func TestIRBlockTimer_Reset(t *testing.T) { var baseCounter [2]int - blockDur := uint32(3) + const blockDur = uint32(3) bt1 := timer.NewBlockTimer( func() (uint32, error) { return blockDur, nil }, @@ -48,8 +49,40 @@ func TestIRBlockTimer_Reset(t *testing.T) { require.Equal(t, baseCounter[0], baseCounter[1]) } +func TestBlockTimer_ResetChangeDuration(t *testing.T) { + var dur uint32 = 2 + var err error + var counter int + + bt := timer.NewBlockTimer( + func() (uint32, error) { return dur, err }, + func() { counter++ }) + + require.NoError(t, bt.Reset()) + + tickN(bt, 2) + require.Equal(t, 1, counter) + + t.Run("return error", func(t *testing.T) { + dur = 5 + err = errors.New("my awesome error") + require.ErrorIs(t, bt.Reset(), err) + + tickN(bt, 2) + require.Equal(t, 2, counter) + }) + t.Run("change duration", func(t *testing.T) { + dur = 5 + err = nil + require.NoError(t, bt.Reset()) + + tickN(bt, 5) + require.Equal(t, 3, counter) + }) +} + func TestBlockTimer(t *testing.T) { - blockDur := uint32(10) + const blockDur = uint32(10) baseCallCounter := uint32(0) bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { @@ -63,85 +96,6 @@ func TestBlockTimer(t *testing.T) { tickN(bt, intervalNum*blockDur) require.Equal(t, intervalNum, uint32(baseCallCounter)) - - // add half-interval handler - halfCallCounter := uint32(0) - - bt.OnDelta(1, 2, func() { - halfCallCounter++ - }) - - // add double interval handler - doubleCallCounter := uint32(0) - - bt.OnDelta(2, 1, func() { - doubleCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - baseCallCounter = 0 - intervalNum = 20 - - tickN(bt, intervalNum*blockDur) - - require.Equal(t, intervalNum, uint32(halfCallCounter)) - require.Equal(t, intervalNum, uint32(baseCallCounter)) - require.Equal(t, intervalNum/2, uint32(doubleCallCounter)) -} - -func TestDeltaPulse(t *testing.T) { - blockDur := uint32(9) - baseCallCounter := uint32(0) - - bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - deltaCallCounter := uint32(0) - - div := uint32(3) - - bt.OnDelta(1, div, func() { - deltaCallCounter++ - }, timer.WithPulse()) - - require.NoError(t, bt.Reset()) - - intervalNum := uint32(7) - - tickN(bt, intervalNum*blockDur) - - require.Equal(t, intervalNum, uint32(baseCallCounter)) - require.Equal(t, intervalNum*div, uint32(deltaCallCounter)) -} - -func TestDeltaReset(t *testing.T) { - blockDur := uint32(6) - baseCallCounter := 0 - - bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - detlaCallCounter := 0 - - bt.OnDelta(1, 3, func() { - detlaCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - tickN(bt, 6) - - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - - require.NoError(t, bt.Reset()) - - tickN(bt, 3) - - require.Equal(t, 2, detlaCallCounter) } func TestNewOneTickTimer(t *testing.T) { @@ -168,82 +122,51 @@ func TestNewOneTickTimer(t *testing.T) { tickN(bt, 10) require.Equal(t, 1, baseCallCounter) }) - - t.Run("delta without pulse", func(t *testing.T) { - blockDur = uint32(10) - baseCallCounter = 0 - - bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - detlaCallCounter := 0 - - bt.OnDelta(1, 10, func() { - detlaCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - tickN(bt, 10) - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - - tickN(bt, 10) // 10 more ticks must not affect counters - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - }) } func TestBlockTimer_TickSameHeight(t *testing.T) { - var baseCounter, deltaCounter int + var baseCounter int blockDur := uint32(2) bt := timer.NewBlockTimer( func() (uint32, error) { return blockDur, nil }, func() { baseCounter++ }) - bt.OnDelta(2, 1, func() { - deltaCounter++ - }) require.NoError(t, bt.Reset()) - check := func(t *testing.T, h uint32, base, delta int) { - for i := 0; i < 2*int(blockDur); i++ { + check := func(t *testing.T, h uint32, base int) { + for range 2 * int(blockDur) { bt.Tick(h) require.Equal(t, base, baseCounter) - require.Equal(t, delta, deltaCounter) } } - check(t, 1, 0, 0) - check(t, 2, 1, 0) - check(t, 3, 1, 0) - check(t, 4, 2, 1) + check(t, 1, 0) + check(t, 2, 1) + check(t, 3, 1) + check(t, 4, 2) t.Run("works the same way after `Reset()`", func(t *testing.T) { t.Run("same block duration", func(t *testing.T) { require.NoError(t, bt.Reset()) baseCounter = 0 - deltaCounter = 0 - check(t, 1, 0, 0) - check(t, 2, 1, 0) - check(t, 3, 1, 0) - check(t, 4, 2, 1) + check(t, 1, 0) + check(t, 2, 1) + check(t, 3, 1) + check(t, 4, 2) }) t.Run("different block duration", func(t *testing.T) { blockDur = 3 require.NoError(t, bt.Reset()) baseCounter = 0 - deltaCounter = 0 - check(t, 1, 0, 0) - check(t, 2, 0, 0) - check(t, 3, 1, 0) - check(t, 4, 1, 0) - check(t, 5, 1, 0) - check(t, 6, 2, 1) + check(t, 1, 0) + check(t, 2, 0) + check(t, 3, 1) + check(t, 4, 1) + check(t, 5, 1) + check(t, 6, 2) }) }) } diff --git a/pkg/network/address.go b/pkg/network/address.go index 88f4a571d..4643eef15 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,12 +2,12 @@ package network import ( "errors" - "fmt" "net" "net/url" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - if err != nil { - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - panic(fmt.Errorf("could not get host addr: %w", err)) - } + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + assert.NoError(err, "could not get host addr") if !a.IsTLSEnabled() { return host diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go index 371d3c76f..63ae0bfdb 100644 --- a/pkg/network/cache/client.go +++ b/pkg/network/cache/client.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" ) @@ -25,6 +26,7 @@ type ( Key *ecdsa.PrivateKey ResponseCallback func(client.ResponseMetaInfo) error AllowExternal bool + DialerSource *net.DialerSource } ) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index f19510d76..54c1e18fb 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,11 +7,13 @@ import ( "sync" "time" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "google.golang.org/grpc" @@ -60,18 +62,26 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address prmInit.Key = *x.opts.Key } + grpcOpts := []grpc.DialOption{ + grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), + metrics.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), + ), + grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), + metrics.NewStreamClientInterceptor(), + tracing.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), + ), + grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + } + prmDial := client.PrmDial{ - Endpoint: addr.URIAddr(), - GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), - ), - grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - ), - }, + Endpoint: addr.URIAddr(), + GRPCDialOptions: grpcOpts, } if x.opts.DialTimeout > 0 { prmDial.DialTimeout = x.opts.DialTimeout @@ -151,7 +161,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie group.IterateAddresses(func(addr network.Address) bool { select { case <-ctx.Done(): - firstErr = context.Canceled + firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled) return true default: } @@ -169,15 +179,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError + if err != nil { + err = fmt.Errorf("client connection error at %v: %w", addr, err) + x.ReportError(err) + } + success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr) if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) { firstErr = err } - if err != nil { - x.ReportError(err) - } - return success }) @@ -239,15 +250,6 @@ func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPut return } -func (x *multiClient) ContainerAnnounceUsedSpace(ctx context.Context, prm client.PrmAnnounceSpace) (res *client.ResAnnounceSpace, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ContainerAnnounceUsedSpace(ctx, prm) - return err - }) - - return -} - func (x *multiClient) ObjectDelete(ctx context.Context, p client.PrmObjectDelete) (res *client.ResObjectDelete, err error) { err = x.iterateClients(ctx, func(c clientcore.Client) error { res, err = c.ObjectDelete(ctx, p) diff --git a/pkg/network/group.go b/pkg/network/group.go index a6de0653e..0044fb2d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,6 +3,8 @@ package network import ( "errors" "fmt" + "iter" + "slices" "sort" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -67,11 +69,10 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // Must iterate over network addresses and pass each one - // to the handler until it returns true. - IterateAddresses(func(string) bool) + // Addresses must return an iterator over network addresses. + Addresses() iter.Seq[string] - // Must return number of addresses in group. + // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int } @@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - iter.IterateAddresses(func(s string) bool { + for s := range iter.Addresses() { var a Address err = a.FromString(s) if err != nil { - err = fmt.Errorf("could not parse address from string: %w", err) - return true + return fmt.Errorf("could not parse address from string: %w", err) } err = f(a) - - return err != nil - }) + if err != nil { + return err + } + } return } @@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { // at least one common address. func (x AddressGroup) Intersects(x2 AddressGroup) bool { for i := range x { - for j := range x2 { - if x[i].equal(x2[j]) { - return true - } + if slices.ContainsFunc(x2, x[i].equal) { + return true } } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index 5b335fa52..d08264533 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,6 +1,8 @@ package network import ( + "iter" + "slices" "sort" "testing" @@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) IterateAddresses(f func(string) bool) { - for i := range t { - f(t[i]) - } +func (t testIterator) Addresses() iter.Seq[string] { + return slices.Values(t) } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go index 6c352484b..14729f4c2 100644 --- a/pkg/network/tls_test.go +++ b/pkg/network/tls_test.go @@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) { b.ReportAllocs() var enabled bool - for i := 0; i < b.N; i++ { + for range b.N { enabled = addr.IsTLSEnabled() } require.True(b, enabled) diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go index 2144a3001..78129bfbe 100644 --- a/pkg/network/transport/accounting/grpc/service.go +++ b/pkg/network/transport/accounting/grpc/service.go @@ -3,9 +3,9 @@ package accounting import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc" accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" + accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" ) // Server wraps FrostFS API Accounting service and diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go index 59783cfc0..850d38a65 100644 --- a/pkg/network/transport/apemanager/grpc/service.go +++ b/pkg/network/transport/apemanager/grpc/service.go @@ -3,9 +3,9 @@ package apemanager import ( "context" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" ) type Server struct { diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go index ed514d6d4..8cbf8d9c3 100644 --- a/pkg/network/transport/container/grpc/service.go +++ b/pkg/network/transport/container/grpc/service.go @@ -3,9 +3,9 @@ package container import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" ) // Server wraps FrostFS API Container service and @@ -81,47 +81,25 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil } -// SetExtendedACL converts gRPC SetExtendedACLRequest message and passes it to internal Container service. -func (s *Server) SetExtendedACL(ctx context.Context, req *containerGRPC.SetExtendedACLRequest) (*containerGRPC.SetExtendedACLResponse, error) { - setEACLReq := new(container.SetExtendedACLRequest) - if err := setEACLReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.SetExtendedACL(ctx, setEACLReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.SetExtendedACLResponse), nil +type containerStreamerV2 struct { + containerGRPC.ContainerService_ListStreamServer } -// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service. -func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) { - getEACLReq := new(container.GetExtendedACLRequest) - if err := getEACLReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.GetExtendedACL(ctx, getEACLReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil +func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error { + return s.ContainerService_ListStreamServer.Send( + resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse), + ) } -// AnnounceUsedSpace converts gRPC AnnounceUsedSpaceRequest message and passes it to internal Container service. -func (s *Server) AnnounceUsedSpace(ctx context.Context, req *containerGRPC.AnnounceUsedSpaceRequest) (*containerGRPC.AnnounceUsedSpaceResponse, error) { - announceReq := new(container.AnnounceUsedSpaceRequest) - if err := announceReq.FromGRPCMessage(req); err != nil { - return nil, err +// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data +// to gRPC stream. +func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error { + listReq := new(container.ListStreamRequest) + if err := listReq.FromGRPCMessage(req); err != nil { + return err } - resp, err := s.srv.AnnounceUsedSpace(ctx, announceReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.AnnounceUsedSpaceResponse), nil + return s.srv.ListStream(listReq, &containerStreamerV2{ + ContainerService_ListStreamServer: gStream, + }) } diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go index 406c77e58..4bc3a42f8 100644 --- a/pkg/network/transport/netmap/grpc/service.go +++ b/pkg/network/transport/netmap/grpc/service.go @@ -3,9 +3,9 @@ package grpc import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" ) // Server wraps FrostFS API Netmap service and diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go index e1655c183..655b1f9fb 100644 --- a/pkg/network/transport/object/grpc/get.go +++ b/pkg/network/transport/object/grpc/get.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type getStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go index 391536e8e..7d7ce0e4c 100644 --- a/pkg/network/transport/object/grpc/range.go +++ b/pkg/network/transport/object/grpc/range.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type getRangeStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go index a151ced09..8432707f7 100644 --- a/pkg/network/transport/object/grpc/search.go +++ b/pkg/network/transport/object/grpc/search.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type searchStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go index 7c6b395d5..15dacd553 100644 --- a/pkg/network/transport/object/grpc/service.go +++ b/pkg/network/transport/object/grpc/service.go @@ -5,10 +5,10 @@ import ( "errors" "io" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) // Server wraps FrostFS API Object service and @@ -24,9 +24,51 @@ func New(c objectSvc.ServiceServer) *Server { } } +// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream. +func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { + stream, err := s.srv.Patch(gStream.Context()) + if err != nil { + return err + } + + for { + req, err := gStream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + resp, err := stream.CloseAndRecv(gStream.Context()) + if err != nil { + return err + } + + return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse)) + } + + return err + } + + patchReq := new(object.PatchRequest) + if err := patchReq.FromGRPCMessage(req); err != nil { + return err + } + + if err := stream.Send(gStream.Context(), patchReq); err != nil { + if errors.Is(err, util.ErrAbortStream) { + resp, err := stream.CloseAndRecv(gStream.Context()) + if err != nil { + return err + } + + return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse)) + } + + return err + } + } +} + // Put opens internal Object service Put stream and overtakes data from gRPC stream to it. func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error { - stream, err := s.srv.Put() + stream, err := s.srv.Put(gStream.Context()) if err != nil { return err } diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go index e0dc74942..6fce397f3 100644 --- a/pkg/network/transport/session/grpc/service.go +++ b/pkg/network/transport/session/grpc/service.go @@ -3,9 +3,9 @@ package session import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc" sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" ) // Server wraps FrostFS API Session service and diff --git a/pkg/network/validation.go b/pkg/network/validation.go index 92f650119..b5157f28f 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,6 +2,7 @@ package network import ( "errors" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -34,8 +35,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) +func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go index b0722cf8a..93e44c52b 100644 --- a/pkg/services/accounting/executor.go +++ b/pkg/services/accounting/executor.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) type ServiceExecutor interface { diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index ac836b71d..6c2df8428 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) @@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { } } -func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { +func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing account") @@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceReque return nil, fmt.Errorf("invalid account: %w", err) } - amount, err := s.client.BalanceOf(id) + amount, err := s.client.BalanceOf(ctx, id) if err != nil { return nil, err } - balancePrecision, err := s.client.Decimals() + balancePrecision, err := s.client.Decimals(ctx) if err != nil { return nil, err } diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go index 72833c46c..a280416fb 100644 --- a/pkg/services/accounting/server.go +++ b/pkg/services/accounting/server.go @@ -3,7 +3,7 @@ package accounting import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) // Server is an interface of the FrostFS API Accounting service server. diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go index cd6ff0307..d8feb76bd 100644 --- a/pkg/services/accounting/sign.go +++ b/pkg/services/accounting/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) type signService struct { diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go index d132ae7db..61fb025b8 100644 --- a/pkg/services/apemanager/audit.go +++ b/pkg/services/apemanager/audit.go @@ -4,10 +4,10 @@ import ( "context" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" ) var _ Server = (*auditService)(nil) @@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), res.GetBody().GetChainID()), @@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), nil), @@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), req.GetBody().GetChainID()), diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go index e64f9a8d1..1d485321c 100644 --- a/pkg/services/apemanager/errors/errors.go +++ b/pkg/services/apemanager/errors/errors.go @@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error { err.WriteReason(reason) return err } + +func ErrAPEManagerInvalidArgument(msg string) error { + err := new(apistatus.InvalidArgument) + err.SetMessage(msg) + return err +} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 25f43486a..fc08fe569 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -8,20 +8,21 @@ import ( "errors" "fmt" - apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape" - apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape" + apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/mr-tron/base58/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -34,6 +35,8 @@ type cfg struct { type Service struct { cfg + waiter Waiter + cnrSrc containercore.Source contractStorage ape_contract.ProxyAdaptedContractStorage @@ -41,11 +44,17 @@ type Service struct { type Option func(*cfg) -func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service { +type Waiter interface { + WaitTxHalt(context.Context, uint32, util.Uint256) error +} + +func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service { s := &Service{ cnrSrc: cnrSrc, contractStorage: contractStorage, + + waiter: waiter, } for i := range opts { @@ -53,7 +62,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC } if s.log == nil { - s.log = &logger.Logger{Logger: zap.NewNop()} + s.log = logger.NewLoggerWrapper(zap.NewNop()) } return s @@ -69,12 +78,12 @@ var _ Server = (*Service)(nil) // validateContainerTargetRequest validates request for the container target. // It checks if request actor is the owner of the container, otherwise it denies the request. -func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error { +func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { - return fmt.Errorf("invalid CID format: %w", err) + return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) } - isOwner, err := s.isActorContainerOwner(cidSDK, pubKey) + isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) if err != nil { return fmt.Errorf("failed to check owner: %w", err) } @@ -84,7 +93,7 @@ func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.Public return nil } -func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { +func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -92,7 +101,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) if err != nil { - return nil, err + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) } if len(chain.ID) == 0 { const randomIDLength = 10 @@ -108,15 +117,19 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } - if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil { + txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } @@ -129,7 +142,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) return resp, nil } -func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { +func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -140,15 +153,19 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } - if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil { + txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } @@ -160,7 +177,7 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe return resp, nil } -func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { +func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -171,12 +188,12 @@ func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) @@ -210,23 +227,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK } sig := vh.GetBodySignature() if sig == nil { - return nil, errEmptyBodySignature + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) } key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) if err != nil { - return nil, fmt.Errorf("invalid signature key: %w", err) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) } return key, nil } -func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { +func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - cnr, err := s.cnrSrc.Get(cid) + cnr, err := s.cnrSrc.Get(ctx, cid) if err != nil { return false, fmt.Errorf("get container error: %w", err) } diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go index 90b2d92ae..e624177ac 100644 --- a/pkg/services/apemanager/server.go +++ b/pkg/services/apemanager/server.go @@ -3,7 +3,7 @@ package apemanager import ( "context" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" ) type Server interface { diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go index eda2a7342..a172624ff 100644 --- a/pkg/services/apemanager/sign.go +++ b/pkg/services/apemanager/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" ) type signService struct { diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go new file mode 100644 index 000000000..eb6263320 --- /dev/null +++ b/pkg/services/common/ape/checker.go @@ -0,0 +1,173 @@ +package ape + +import ( + "context" + "crypto/ecdsa" + "errors" + "fmt" + + aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router" + frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +var ( + errBearerExpired = errors.New("bearer token has expired") + errBearerInvalidSignature = errors.New("bearer token has invalid signature") + errBearerInvalidContainerID = errors.New("bearer token was created for another container") + errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") + errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") +) + +type CheckPrm struct { + // Request is an APE-request that is checked by policy engine. + Request aperequest.Request + + Namespace string + + Container cid.ID + + // An encoded container's owner user ID. + ContainerOwner user.ID + + // PublicKey is public key of the request sender. + PublicKey *keys.PublicKey + + // The request's bearer token. It is used in order to check APE overrides with the token. + BearerToken *bearer.Token +} + +// CheckCore provides methods to perform the common logic of APE check. +type CheckCore interface { + // CheckAPE performs the common policy-engine check logic on a prepared request. + CheckAPE(ctx context.Context, prm CheckPrm) error +} + +type checkerCoreImpl struct { + LocalOverrideStorage policyengine.LocalOverrideStorage + MorphChainStorage policyengine.MorphRuleChainStorageReader + FrostFSSubjectProvider frostfsidcore.SubjectProvider + State netmap.State +} + +func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, + frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State, +) CheckCore { + return &checkerCoreImpl{ + LocalOverrideStorage: localOverrideStorage, + MorphChainStorage: morphChainStorage, + FrostFSSubjectProvider: frostFSSubjectProvider, + State: state, + } +} + +// CheckAPE performs the common policy-engine check logic on a prepared request. +func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { + var cr policyengine.ChainRouter + if prm.BearerToken != nil { + var err error + if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { + return fmt.Errorf("bearer validation error: %w", err) + } + if prm.BearerToken.Impersonate() { + cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) + } else { + override, isSet := prm.BearerToken.APEOverride() + if !isSet { + return errors.New("expected for override within bearer") + } + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) + } + } + } else { + cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) + } + + groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey) + if err != nil { + return fmt.Errorf("failed to get group ids: %w", err) + } + + // Policy contract keeps group related chains as namespace-group pair. + for i := range groups { + groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i]) + } + + rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups) + status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request) + if err != nil { + return err + } + if found && status == apechain.Allow { + return nil + } + return newChainRouterError(prm.Request.Operation(), status) +} + +// isValidBearer checks whether bearer token was correctly signed by authorized +// entity. This method might be defined on whole ACL service because it will +// require fetching current epoch to check lifetime. +func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error { + if token == nil { + return nil + } + + // First check token lifetime. Simplest verification. + if token.InvalidAt(st.CurrentEpoch()) { + return errBearerExpired + } + + // Then check if bearer token is signed correctly. + if !token.VerifySignature() { + return errBearerInvalidSignature + } + + // Check for ape overrides defined in the bearer token. + if apeOverride, isSet := token.APEOverride(); isSet { + switch apeOverride.Target.TargetType { + case ape.TargetTypeContainer: + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID + } + default: + } + } + + // Then check if container owner signed this token. + if !bearer.ResolveIssuer(*token).Equals(ownerCnr) { + return errBearerNotSignedByOwner + } + + // Then check if request sender has rights to use this token. + var usrSender user.ID + user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) + + // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's + // public key, but not the actual sender. + if !token.Impersonate() { + if !token.AssertUser(usrSender) { + return errBearerInvalidOwner + } + } else { + if !bearer.ResolveIssuer(*token).Equals(usrSender) { + return errBearerInvalidOwner + } + } + + return nil +} diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go new file mode 100644 index 000000000..d3c381de7 --- /dev/null +++ b/pkg/services/common/ape/error.go @@ -0,0 +1,33 @@ +package ape + +import ( + "fmt" + + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" +) + +// ChainRouterError is returned when chain router validation prevents +// the APE request from being processed (no rule found, access denied, etc.). +type ChainRouterError struct { + operation string + status apechain.Status +} + +func (e *ChainRouterError) Error() string { + return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status()) +} + +func (e *ChainRouterError) Operation() string { + return e.operation +} + +func (e *ChainRouterError) Status() apechain.Status { + return e.status +} + +func newChainRouterError(operation string, status apechain.Status) *ChainRouterError { + return &ChainRouterError{ + operation: operation, + status: status, + } +} diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 3ea591c6a..3b5dab9aa 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -12,14 +12,14 @@ import ( "net" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -49,11 +49,11 @@ var ( ) type ir interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type containers interface { - Get(cid.ID) (*containercore.Container, error) + Get(context.Context, cid.ID) (*containercore.Container, error) } type apeChecker struct { @@ -78,15 +78,6 @@ func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm } } -func (ac *apeChecker) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.AnnounceUsedSpace") - defer span.End() - - // this method is not used, so not checked - - return ac.next.AnnounceUsedSpace(ctx, req) -} - func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete") defer span.End() @@ -111,23 +102,11 @@ func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*cont return ac.next.Get(ctx, req) } -func (ac *apeChecker) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.GetExtendedACL") - defer span.End() - - if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), - nativeschema.MethodGetContainerEACL); err != nil { - return nil, err - } - - return ac.next.GetExtendedACL(ctx, req) -} - func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -137,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -147,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co } } - namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) if err != nil { return nil, fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { return nil, err } @@ -164,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -196,11 +175,84 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co return nil, apeErr(nativeschema.MethodListContainers, s) } +func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error { + ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") + defer span.End() + + role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + if err != nil { + return err + } + + reqProps := map[string]string{ + nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), + nativeschema.PropertyKeyActorRole: role, + } + + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + if err != nil { + return err + } + if p, ok := peer.FromContext(ctx); ok { + if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { + reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() + } + } + + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) + if err != nil { + return fmt.Errorf("could not get owner namespace: %w", err) + } + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { + return err + } + + request := aperequest.NewRequest( + nativeschema.MethodListContainers, + aperequest.NewResource( + resourceName(namespace, ""), + make(map[string]string), + ), + reqProps, + ) + + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + if err != nil { + return fmt.Errorf("failed to get group ids: %w", err) + } + + // Policy contract keeps group related chains as namespace-group pair. + for i := range groups { + groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) + } + + rt := policyengine.NewRequestTargetWithNamespace(namespace) + rt.User = &policyengine.Target{ + Type: policyengine.User, + Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), + } + rt.Groups = make([]policyengine.Target, len(groups)) + for i := range groups { + rt.Groups[i] = policyengine.GroupTarget(groups[i]) + } + + s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) + if err != nil { + return err + } + + if found && s == apechain.Allow { + return ac.next.ListStream(req, stream) + } + + return apeErr(nativeschema.MethodListContainers, s) +} + func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -210,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -220,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont } } - namespace, err := ac.namespaceByOwner(req.GetBody().GetContainer().GetOwnerID()) + namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID()) if err != nil { return nil, fmt.Errorf("get namespace error: %w", err) } @@ -228,16 +280,21 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, err } + cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer()) + if err != nil { + return nil, fmt.Errorf("get container properties: %w", err) + } + request := aperequest.NewRequest( nativeschema.MethodPutContainer, aperequest.NewResource( resourceName(namespace, ""), - make(map[string]string), + cnrProps, ), reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -269,7 +326,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, apeErr(nativeschema.MethodPutContainer, s) } -func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { +func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { if vh == nil { return "", nil, errMissingVerificationHeader } @@ -292,7 +349,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", nil, err } @@ -303,18 +360,6 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R return nativeschema.PropertyValueContainerRoleOthers, pk, nil } -func (ac *apeChecker) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.SetExtendedACL") - defer span.End() - - if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetEACL().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), - nativeschema.MethodSetContainerEACL); err != nil { - return nil, err - } - - return ac.next.SetExtendedACL(ctx, req) -} - func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error { if vh == nil { return errMissingVerificationHeader @@ -325,7 +370,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con return err } - cont, err := ac.reader.Get(id) + cont, err := ac.reader.Get(ctx, id) if err != nil { return err } @@ -341,7 +386,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con namespace = cntNamespace } - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -355,7 +400,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con op, aperequest.NewResource( resourceName(namespace, id.EncodeToString()), - ac.getContainerProps(cont), + getContainerProps(cont), ), reqProps, ) @@ -405,10 +450,26 @@ func resourceName(namespace string, container string) string { return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container) } -func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string { - return map[string]string{ +func getContainerProps(c *containercore.Container) map[string]string { + props := map[string]string{ nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(), } + for attrName, attrVal := range c.Value.Attributes() { + name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName) + props[name] = attrVal + } + return props +} + +func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) { + if cnrV2 == nil { + return nil, errors.New("container is not set") + } + c := cnrSDK.Container{} + if err := c.ReadFromV2(*cnrV2); err != nil { + return nil, err + } + return getContainerProps(&containercore.Container{Value: c}), nil } func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, @@ -418,7 +479,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe if err != nil { return nil, nil, err } - role, err := ac.getRole(actor, pk, cont, cnrID) + role, err := ac.getRole(ctx, actor, pk, cont, cnrID) if err != nil { return nil, nil, err } @@ -426,7 +487,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, nil, err } @@ -438,13 +499,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe return reqProps, pk, nil } -func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { +func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { if cont.Value.Owner().Equals(*actor) { return nativeschema.PropertyValueContainerRoleOwner, nil } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", err } @@ -452,7 +513,7 @@ func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containe return nativeschema.PropertyValueContainerRoleIR, nil } - isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont) + isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont) if err != nil { return "", err } @@ -546,8 +607,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { - innerRingKeys, err := ac.ir.InnerRingKeys() +func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) { + innerRingKeys, err := ac.ir.InnerRingKeys(ctx) if err != nil { return false, err } @@ -561,50 +622,47 @@ func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { return false, nil } -func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { +func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { binCnrID := make([]byte, sha256.Size) cnrID.Encode(binCnrID) - nm, err := netmap.GetLatestNetworkMap(ac.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm) if err != nil { return false, err } - in, err := isContainerNode(nm, pk, binCnrID, cont) - if err != nil { - return false, err - } else if in { + if isContainerNode(nm, pk, binCnrID, cont) { return true, nil } // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = netmap.GetPreviousNetworkMap(ac.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm) if err != nil { return false, err } - return isContainerNode(nm, pk, binCnrID, cont) + return isContainerNode(nm, pk, binCnrID, cont), nil } -func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) { - cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID) - if err != nil { - return false, err - } +func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool { + // It could an error only if the network map doesn't have enough nodes to + // fulfil the policy. It's a logical error that doesn't affect an actor role + // determining, so we ignore it + cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID) for i := range cnrVectors { for j := range cnrVectors[i] { if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) { - return true, nil + return true } } } - return false, nil + return false } -func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -612,23 +670,34 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr, err := ownerSDK.ScriptHash() - if err != nil { - return "", err - } + addr := ownerSDK.ScriptHash() namespace := "" - subject, err := ac.frostFSIDClient.GetSubject(addr) + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace - } else { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) - } + } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) } return namespace, nil } +func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { + var ownerSDK user.ID + if owner == nil { + return "", errOwnerIDIsNotSet + } + if err := ownerSDK.ReadFromV2(*owner); err != nil { + return "", err + } + addr := ownerSDK.ScriptHash() + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) + if err != nil { + return "", fmt.Errorf("get subject error: %w", err) + } + return subject.Namespace, nil +} + // validateNamespace validates a namespace set in a container. // If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request // is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied. @@ -656,12 +725,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro // validateNamespace validates if a namespace of a request actor equals to owner's namespace. // An actor's namespace is calculated by a public key. -func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error { +func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - actorNamespace, err := ac.namespaceByOwner(actorOwnerID) + actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID) if err != nil { return fmt.Errorf("could not get actor namespace: %w", err) } @@ -672,11 +741,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNa } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { +func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index a6f0fb222..6438c34ca 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -9,14 +9,13 @@ import ( "net" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -50,13 +49,13 @@ func TestAPE(t *testing.T) { t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag) t.Run("deny get container by IP", testDenyGetContainerByIP) t.Run("deny get container by group id", testDenyGetContainerByGroupID) - t.Run("deny set container eACL for IR", testDenySetContainerEACLForIR) - t.Run("deny get container eACL for IR with session token", testDenyGetContainerEACLForIRSessionToken) t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken) t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID) t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace) t.Run("deny list containers for owner with PK", testDenyListContainersForPK) t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError) + t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr) + t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr) } const ( @@ -567,6 +566,185 @@ func testDenyGetContainerByIP(t *testing.T) { require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) } +func testDenyGetContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + }, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + Groups: []*client.Group{ + { + ID: 19888, + }, + }, + }, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodGetContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := &container.GetRequest{} + req.SetBody(&container.GetRequestBody{}) + var refContID refs.ContainerID + contID.WriteToV2(&refContID) + req.GetBody().SetContainerID(&refContID) + + require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) + + resp, err := apeSrv.Get(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + +func testDenyPutContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + owner := testContainer.Owner() + ownerAddr := owner.ScriptHash() + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + ownerAddr: {}, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + ownerAddr: {}, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodPutContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + nativeschema.ResourceFormatRootContainers, + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := initPutRequest(t, testContainer) + + resp, err := apeSrv.Put(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + func testDenyGetContainerByGroupID(t *testing.T) { t.Parallel() srv := &srvStub{ @@ -665,173 +843,6 @@ func testDenyGetContainerByGroupID(t *testing.T) { require.ErrorAs(t, err, &errAccessDenied) } -func testDenySetContainerEACLForIR(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodSetContainerEACL, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleIR, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.SetExtendedACLRequest{} - req.SetBody(&container.SetExtendedACLRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetEACL(&acl.Table{}) - req.GetBody().GetEACL().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - ir.keys = append(ir.keys, pk.PublicKey().Bytes()) - - resp, err := apeSrv.SetExtendedACL(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyGetContainerEACLForIRSessionToken(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainerEACL, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleIR, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetExtendedACLRequest{} - req.SetBody(&container.GetExtendedACLRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - sessionPK, err := keys.NewPrivateKey() - require.NoError(t, err) - sToken := sessiontest.ContainerSigned() - sToken.ApplyOnlyTo(contID) - require.NoError(t, sToken.Sign(sessionPK.PrivateKey)) - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetSessionToken(&sTokenV2) - req.SetMetaHeader(metaHeader) - - ir.keys = append(ir.keys, sessionPK.PublicKey().Bytes()) - - resp, err := apeSrv.GetExtendedACL(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - func testDenyPutContainerForOthersSessionToken(t *testing.T) { t.Parallel() srv := &srvStub{ @@ -845,12 +856,16 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { keys: [][]byte{}, } nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) testContainer := containertest.Container() + owner := testContainer.Owner() + ownerAddr := owner.ScriptHash() + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + ownerAddr: {}, + }, + } + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) nm.currentEpoch = 100 nm.netmaps = map[uint64]*netmap.NetMap{} @@ -938,7 +953,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1022,7 +1037,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1229,11 +1244,6 @@ type srvStub struct { calls map[string]int } -func (s *srvStub) AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) { - s.calls["AnnounceUsedSpace"]++ - return &container.AnnounceUsedSpaceResponse{}, nil -} - func (s *srvStub) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) { s.calls["Delete"]++ return &container.DeleteResponse{}, nil @@ -1244,31 +1254,26 @@ func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetRes return &container.GetResponse{}, nil } -func (s *srvStub) GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - s.calls["GetExtendedACL"]++ - return &container.GetExtendedACLResponse{}, nil -} - func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) { s.calls["List"]++ return &container.ListResponse{}, nil } +func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error { + s.calls["ListStream"]++ + return nil +} + func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) { s.calls["Put"]++ return &container.PutResponse{}, nil } -func (s *srvStub) SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { - s.calls["SetExtendedACL"]++ - return &container.SetExtendedACLResponse{}, nil -} - type irStub struct { keys [][]byte } -func (s *irStub) InnerRingKeys() ([][]byte, error) { +func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.keys, nil } @@ -1276,7 +1281,7 @@ type containerStub struct { c map[cid.ID]*containercore.Container } -func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) { +func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) { if v, ok := s.c[id]; ok { return v, nil } @@ -1288,21 +1293,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -1311,7 +1316,7 @@ type frostfsidStub struct { subjectsExt map[util.Uint160]*client.SubjectExtended } -func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) { +func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) { s, ok := f.subjects[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1319,7 +1324,7 @@ func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) return s, nil } -func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) { s, ok := f.subjectsExt[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1707,26 +1712,21 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put return req } -func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 { +func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 { var ownerSDK *user.ID owner := testContainer.Owner() ownerSDK = &owner - sc, err := ownerSDK.ScriptHash() - require.NoError(t, err) - return sc + return ownerSDK.ScriptHash() } func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) { var actorUserID user.ID user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey())) - var err error - actorScriptHash, err = actorUserID.ScriptHash() - require.NoError(t, err) + actorScriptHash = actorUserID.ScriptHash() var ownerUserID user.ID user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey())) - ownerScriptHash, err = ownerUserID.ScriptHash() - require.NoError(t, err) + ownerScriptHash = ownerUserID.ScriptHash() require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String()) return } diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index 7ef432bb1..b235efa3c 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -4,11 +4,10 @@ import ( "context" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - container_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) @@ -29,24 +28,6 @@ func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Serv } } -// AnnounceUsedSpace implements Server. -func (a *auditService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) { - res, err := a.next.AnnounceUsedSpace(ctx, req) - if !a.enabled.Load() { - return res, err - } - - var ids []*refs.ContainerID - for _, v := range req.GetBody().GetAnnouncements() { - ids = append(ids, v.GetContainerID()) - } - - audit.LogRequest(a.log, container_grpc.ContainerService_AnnounceUsedSpace_FullMethodName, req, - audit.TargetFromRefs(ids, &cid.ID{}), err == nil) - - return res, err -} - // Delete implements Server. func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { res, err := a.next.Delete(ctx, req) @@ -54,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err @@ -66,18 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) - return res, err -} - -// GetExtendedACL implements Server. -func (a *auditService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - res, err := a.next.GetExtendedACL(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(a.log, container_grpc.ContainerService_GetExtendedACL_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } @@ -88,29 +58,29 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) return res, err } +// ListStream implements Server. +func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := a.next.ListStream(req, stream) + if !a.enabled.Load() { + return err + } + audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req, + audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) + return err +} + // Put implements Server. func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { res, err := a.next.Put(ctx, req) if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req, audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } - -// SetExtendedACL implements Server. -func (a *auditService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { - res, err := a.next.SetExtendedACL(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(a.log, container_grpc.ContainerService_SetExtendedACL_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetEACL().GetContainerID(), &cid.ID{}), err == nil) - return res, err -} diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go index d4ae11d62..cdd0d2514 100644 --- a/pkg/services/container/executor.go +++ b/pkg/services/container/executor.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) type ServiceExecutor interface { @@ -14,8 +14,7 @@ type ServiceExecutor interface { Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error) Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error) List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error) - SetExtendedACL(context.Context, *session.Token, *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) - GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) + ListStream(context.Context, *container.ListStreamRequest, ListStream) error } type executorSvc struct { @@ -96,33 +95,10 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co return resp, nil } -func (s *executorSvc) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { - meta := req.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - respBody, err := s.exec.SetExtendedACL(ctx, meta.GetSessionToken(), req.GetBody()) +func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := s.exec.ListStream(stream.Context(), req, stream) if err != nil { - return nil, fmt.Errorf("could not execute SetEACL request: %w", err) + return fmt.Errorf("could not execute ListStream request: %w", err) } - - resp := new(container.SetExtendedACLResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute GetEACL request: %w", err) - } - - resp := new(container.GetExtendedACLResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil + return nil } diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index e2e79f3d2..eaa608eba 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -5,16 +5,14 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) var errMissingUserID = errors.New("missing user ID") @@ -27,20 +25,20 @@ type morphExecutor struct { // Reader is an interface of read-only container storage. type Reader interface { containercore.Source - containercore.EACLSource // ContainersOf returns a list of container identifiers belonging // to the specified user of FrostFS system. Returns the identifiers // of all FrostFS containers if pointer to owner identifier is nil. - ContainersOf(*user.ID) ([]cid.ID, error) + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } // Writer is an interface of container storage updater. type Writer interface { // Put stores specified container in the side chain. - Put(containercore.Container) (*cid.ID, error) + Put(context.Context, containercore.Container) (*cid.ID, error) // Delete removes specified container from the side chain. - Delete(containercore.RemovalWitness) error + Delete(context.Context, containercore.RemovalWitness) error } func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { @@ -50,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { } } -func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { +func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { sigV2 := body.GetSignature() if sigV2 == nil { // TODO(@cthulhu-rider): #468 use "const" error @@ -83,7 +81,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con } } - idCnr, err := s.wrt.Put(cnr) + idCnr, err := s.wrt.Put(ctx, cnr) if err != nil { return nil, err } @@ -97,7 +95,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con return res, nil } -func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { +func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -127,7 +125,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body * rmWitness.Signature = body.GetSignature() rmWitness.SessionToken = tok - err = s.wrt.Delete(rmWitness) + err = s.wrt.Delete(ctx, rmWitness) if err != nil { return nil, err } @@ -135,7 +133,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body * return new(container.DeleteResponseBody), nil } -func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { +func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -148,7 +146,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return nil, fmt.Errorf("invalid container ID: %w", err) } - cnr, err := s.rdr.Get(id) + cnr, err := s.rdr.Get(ctx, id) if err != nil { return nil, err } @@ -175,7 +173,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return res, nil } -func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { +func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errMissingUserID @@ -188,7 +186,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return nil, fmt.Errorf("invalid user ID: %w", err) } - cnrs, err := s.rdr.ContainersOf(&id) + cnrs, err := s.rdr.ContainersOf(ctx, &id) if err != nil { return nil, err } @@ -204,43 +202,55 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return res, nil } -func (s *morphExecutor) SetExtendedACL(_ context.Context, _ *sessionV2.Token, _ *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetExtendedACL not implemented") -} - -func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) { - idV2 := body.GetContainerID() +func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { + body := req.GetBody() + idV2 := body.GetOwnerID() if idV2 == nil { - return nil, errors.New("missing container ID") + return errMissingUserID } - var id cid.ID + var id user.ID err := id.ReadFromV2(*idV2) if err != nil { - return nil, fmt.Errorf("invalid container ID: %w", err) + return fmt.Errorf("invalid user ID: %w", err) } - eaclInfo, err := s.rdr.GetEACL(id) - if err != nil { - return nil, err + resBody := new(container.ListStreamResponseBody) + r := new(container.ListStreamResponse) + r.SetBody(resBody) + + var cidList []refs.ContainerID + + // Amount of containers to send at once. + const batchSize = 1000 + + processCID := func(id cid.ID) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var refID refs.ContainerID + id.WriteToV2(&refID) + cidList = append(cidList, refID) + if len(cidList) == batchSize { + r.GetBody().SetContainerIDs(cidList) + cidList = cidList[:0] + return stream.Send(r) + } + return nil } - var sigV2 refs.Signature - eaclInfo.Signature.WriteToV2(&sigV2) - - var tokV2 *sessionV2.Token - - if eaclInfo.Session != nil { - tokV2 = new(sessionV2.Token) - - eaclInfo.Session.WriteToV2(tokV2) + if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil { + return err } - res := new(container.GetExtendedACLResponseBody) - res.SetEACL(eaclInfo.Value.ToV2()) - res.SetSignature(&sigV2) - res.SetSessionToken(tokV2) + if len(cidList) > 0 { + r.GetBody().SetContainerIDs(cidList) + return stream.Send(r) + } - return res, nil + return nil } diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go index 560c69232..1f6fdb0be 100644 --- a/pkg/services/container/morph/executor_test.go +++ b/pkg/services/container/morph/executor_test.go @@ -4,12 +4,12 @@ import ( "context" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test" @@ -24,15 +24,11 @@ type mock struct { containerSvcMorph.Reader } -func (m mock) Put(_ containerCore.Container) (*cid.ID, error) { +func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) { return new(cid.ID), nil } -func (m mock) Delete(_ containerCore.RemovalWitness) error { - return nil -} - -func (m mock) PutEACL(_ containerCore.EACL) error { +func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error { return nil } diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go index 052a8c945..d9208077d 100644 --- a/pkg/services/container/server.go +++ b/pkg/services/container/server.go @@ -3,7 +3,8 @@ package container import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) // Server is an interface of the FrostFS API Container service server. @@ -12,7 +13,11 @@ type Server interface { Get(context.Context, *container.GetRequest) (*container.GetResponse, error) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) List(context.Context, *container.ListRequest) (*container.ListResponse, error) - SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) - GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) - AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) + ListStream(*container.ListStreamRequest, ListStream) error +} + +// ListStream is an interface of FrostFS API v2 compatible search streamer. +type ListStream interface { + util.ServerStream + Send(*container.ListStreamResponse) error } diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go index bba717f60..85fe7ae87 100644 --- a/pkg/services/container/sign.go +++ b/pkg/services/container/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) type signService struct { @@ -57,29 +57,39 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co return resp, s.sigSvc.SignResponse(resp, err) } -func (s *signService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { +func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error { if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.SetExtendedACLResponse) - return resp, s.sigSvc.SignResponse(resp, err) + resp := new(container.ListStreamResponse) + _ = s.sigSvc.SignResponse(resp, err) + return stream.Send(resp) } - resp, err := util.EnsureNonNilResponse(s.svc.SetExtendedACL(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) + + ss := &listStreamSigner{ + ListStream: stream, + sigSvc: s.sigSvc, + } + err := s.svc.ListStream(req, ss) + if err != nil || !ss.nonEmptyResp { + return ss.send(new(container.ListStreamResponse), err) + } + return nil } -func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.GetExtendedACLResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.GetExtendedACL(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) +type listStreamSigner struct { + ListStream + sigSvc *util.SignService + + nonEmptyResp bool // set on first Send call } -func (s *signService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.AnnounceUsedSpaceResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.AnnounceUsedSpace(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) +func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error { + s.nonEmptyResp = true + return s.send(resp, nil) +} + +func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error { + if err := s.sigSvc.SignResponse(resp, err); err != nil { + return err + } + return s.ListStream.Send(resp) } diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go new file mode 100644 index 000000000..4f8708da7 --- /dev/null +++ b/pkg/services/container/transport_splitter.go @@ -0,0 +1,92 @@ +package container + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" +) + +type ( + TransportSplitter struct { + next Server + + respSvc *response.Service + cnrAmount uint32 + } + + listStreamMsgSizeCtrl struct { + util.ServerStream + stream ListStream + respSvc *response.Service + cnrAmount uint32 + } +) + +func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server { + return &TransportSplitter{ + next: next, + respSvc: respSvc, + cnrAmount: cnrAmount, + } +} + +func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { + return s.next.Put(ctx, req) +} + +func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { + return s.next.Delete(ctx, req) +} + +func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { + return s.next.Get(ctx, req) +} + +func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { + return s.next.List(ctx, req) +} + +func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error { + return s.next.ListStream(req, &listStreamMsgSizeCtrl{ + ServerStream: stream, + stream: stream, + respSvc: s.respSvc, + cnrAmount: s.cnrAmount, + }) +} + +func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error { + s.respSvc.SetMeta(resp) + body := resp.GetBody() + ids := body.GetContainerIDs() + + var newResp *container.ListStreamResponse + + for { + if newResp == nil { + newResp = new(container.ListStreamResponse) + newResp.SetBody(body) + } + + cut := min(s.cnrAmount, uint32(len(ids))) + + body.SetContainerIDs(ids[:cut]) + newResp.SetMetaHeader(resp.GetMetaHeader()) + newResp.SetVerificationHeader(resp.GetVerificationHeader()) + + if err := s.stream.Send(newResp); err != nil { + return fmt.Errorf("TransportSplitter: %w", err) + } + + ids = ids[cut:] + + if len(ids) == 0 { + break + } + } + + return nil +} diff --git a/pkg/services/control/common_test.go b/pkg/services/control/common_test.go deleted file mode 100644 index bc512b4be..000000000 --- a/pkg/services/control/common_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package control_test - -import ( - "crypto/rand" - "testing" - - "github.com/mr-tron/base58" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -type protoMessage interface { - StableMarshal([]byte) []byte - proto.Message -} - -func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) { - require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2)) - - require.True(t, cmp(m1, m2)) -} - -func testData(sz int) []byte { - d := make([]byte, sz) - - _, _ = rand.Read(d) - - return d -} - -func testString() string { - return base58.Encode(testData(10)) -} diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go index fd6f020d1..37daf67be 100644 --- a/pkg/services/control/convert.go +++ b/pkg/services/control/convert.go @@ -1,8 +1,8 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" ) type requestWrapper struct { diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go index c892c5b6c..024676b87 100644 --- a/pkg/services/control/ir/convert.go +++ b/pkg/services/control/ir/convert.go @@ -1,8 +1,8 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" ) type requestWrapper struct { diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go index 0c9400f6c..62f800d99 100644 --- a/pkg/services/control/ir/rpc.go +++ b/pkg/services/control/ir/rpc.go @@ -1,9 +1,9 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" ) const serviceName = "ircontrol.ControlService" diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go index 9f7a8b879..d9f65a2fc 100644 --- a/pkg/services/control/ir/server/audit.go +++ b/pkg/services/control/ir/server/audit.go @@ -6,10 +6,10 @@ import ( "strings" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) @@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck if !a.enabled.Load() { return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } @@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC } } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) return res, err } @@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil) return res, err } @@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 2447a8a74..0509d2646 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -5,10 +5,10 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "google.golang.org/grpc/codes" @@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) // TickEpoch forces a new epoch. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { +func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -48,12 +48,12 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c resp := new(control.TickEpochResponse) resp.SetBody(new(control.TickEpochResponse_Body)) - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return nil, fmt.Errorf("getting current epoch: %w", err) } - vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub()) + vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing new epoch: %w", err) } @@ -69,7 +69,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c // RemoveNode forces a node removal. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { +func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -77,7 +77,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( resp := new(control.RemoveNodeResponse) resp.SetBody(new(control.RemoveNodeResponse_Body)) - nm, err := s.netmapClient.NetMap() + nm, err := s.netmapClient.NetMap(ctx) if err != nil { return nil, fmt.Errorf("getting netmap: %w", err) } @@ -91,11 +91,11 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( if len(nodeInfo.PublicKey()) == 0 { return nil, status.Error(codes.NotFound, "no such node") } - if nodeInfo.IsOffline() { + if nodeInfo.Status().IsOffline() { return nil, status.Error(codes.FailedPrecondition, "node is already offline") } - vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub()) + vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing node removal: %w", err) } @@ -109,7 +109,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( } // RemoveContainer forces a container removal. -func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { +func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error()) } var err error - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -138,13 +138,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error()) } - cids, err := s.containerClient.ContainersOf(&owner) + cids, err := s.containerClient.ContainersOf(ctx, &owner) if err != nil { return nil, fmt.Errorf("failed to get owner's containers: %w", err) } for _, containerID := range cids { - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return resp, nil } -func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) { +func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) { var prm container.DeletePrm prm.SetCID(containerID[:]) prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := s.containerClient.Delete(prm) + vub, err := s.containerClient.Delete(ctx, prm) if err != nil { return 0, fmt.Errorf("forcing container removal: %w", err) } diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go index 0c2de5300..9d5cfefc8 100644 --- a/pkg/services/control/ir/server/deps.go +++ b/pkg/services/control/ir/server/deps.go @@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ // HealthChecker is component interface for calculating // the current health status of a node. type HealthChecker interface { - // Must calculate and return current health status of the IR application. + // HealthStatus must calculate and return current health status of the IR application. // // If status can not be calculated for any reason, // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index c2a4f88a6..0cfca71c1 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - switch { - case prm.healthChecker == nil: + if prm.healthChecker == nil { panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go index f72d51f9e..d39f6d5f9 100644 --- a/pkg/services/control/ir/server/sign.go +++ b/pkg/services/control/ir/server/sign.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" ) diff --git a/pkg/services/control/ir/service.go b/pkg/services/control/ir/service.go deleted file mode 100644 index b2db2b43a..000000000 --- a/pkg/services/control/ir/service.go +++ /dev/null @@ -1,46 +0,0 @@ -package control - -// SetBody sets health check request body. -func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetHealthStatus sets health status of the IR application. -func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { - if x != nil { - x.HealthStatus = v - } -} - -// SetBody sets health check response body. -func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { - if x != nil { - x.Body = v - } -} - -func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) { - if x != nil { - x.Body = v - } -} - -func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) { - if x != nil { - x.Body = v - } -} - -func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) { - if x != nil { - x.Body = v - } -} - -func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) { - if x != nil { - x.Body = v - } -} diff --git a/pkg/services/control/ir/service.pb.go b/pkg/services/control/ir/service.pb.go deleted file mode 100644 index d1e253bf5..000000000 --- a/pkg/services/control/ir/service.pb.go +++ /dev/null @@ -1,1277 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/control/ir/service.proto - -package control - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Health check request. -type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of health check request message. - Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - // Should be signed by node key or one of - // the keys configured by the node. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthCheckRequest) Reset() { - *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckRequest) ProtoMessage() {} - -func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. -func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0} -} - -func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthCheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Health check response. -type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of health check response message. - Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthCheckResponse) Reset() { - *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckResponse) ProtoMessage() {} - -func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. -func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1} -} - -func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthCheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type TickEpochRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *TickEpochRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *TickEpochRequest) Reset() { - *x = TickEpochRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TickEpochRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TickEpochRequest) ProtoMessage() {} - -func (x *TickEpochRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TickEpochRequest.ProtoReflect.Descriptor instead. -func (*TickEpochRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2} -} - -func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *TickEpochRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type TickEpochResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *TickEpochResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *TickEpochResponse) Reset() { - *x = TickEpochResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TickEpochResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TickEpochResponse) ProtoMessage() {} - -func (x *TickEpochResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TickEpochResponse.ProtoReflect.Descriptor instead. -func (*TickEpochResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3} -} - -func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *TickEpochResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveNodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveNodeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveNodeRequest) Reset() { - *x = RemoveNodeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveNodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveNodeRequest) ProtoMessage() {} - -func (x *RemoveNodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveNodeRequest.ProtoReflect.Descriptor instead. -func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4} -} - -func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveNodeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveNodeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveNodeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveNodeResponse) Reset() { - *x = RemoveNodeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveNodeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveNodeResponse) ProtoMessage() {} - -func (x *RemoveNodeResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveNodeResponse.ProtoReflect.Descriptor instead. -func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5} -} - -func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveNodeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveContainerRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveContainerRequest) Reset() { - *x = RemoveContainerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveContainerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveContainerRequest) ProtoMessage() {} - -func (x *RemoveContainerRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveContainerRequest.ProtoReflect.Descriptor instead. -func (*RemoveContainerRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{6} -} - -func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveContainerRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveContainerResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveContainerResponse) Reset() { - *x = RemoveContainerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveContainerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveContainerResponse) ProtoMessage() {} - -func (x *RemoveContainerResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveContainerResponse.ProtoReflect.Descriptor instead. -func (*RemoveContainerResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{7} -} - -func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveContainerResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Health check request body. -type HealthCheckRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *HealthCheckRequest_Body) Reset() { - *x = HealthCheckRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckRequest_Body) ProtoMessage() {} - -func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead. -func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0, 0} -} - -// Health check response body -type HealthCheckResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Health status of IR node application. - HealthStatus HealthStatus `protobuf:"varint,1,opt,name=health_status,json=healthStatus,proto3,enum=ircontrol.HealthStatus" json:"health_status,omitempty"` -} - -func (x *HealthCheckResponse_Body) Reset() { - *x = HealthCheckResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckResponse_Body) ProtoMessage() {} - -func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead. -func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { - if x != nil { - return x.HealthStatus - } - return HealthStatus_HEALTH_STATUS_UNDEFINED -} - -type TickEpochRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Valid until block value override. - Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *TickEpochRequest_Body) Reset() { - *x = TickEpochRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TickEpochRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TickEpochRequest_Body) ProtoMessage() {} - -func (x *TickEpochRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TickEpochRequest_Body.ProtoReflect.Descriptor instead. -func (*TickEpochRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *TickEpochRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -type TickEpochResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Valid until block value for transaction. - Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *TickEpochResponse_Body) Reset() { - *x = TickEpochResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TickEpochResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TickEpochResponse_Body) ProtoMessage() {} - -func (x *TickEpochResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TickEpochResponse_Body.ProtoReflect.Descriptor instead. -func (*TickEpochResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *TickEpochResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -type RemoveNodeRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Valid until block value override. - Vub uint32 `protobuf:"varint,2,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *RemoveNodeRequest_Body) Reset() { - *x = RemoveNodeRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveNodeRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveNodeRequest_Body) ProtoMessage() {} - -func (x *RemoveNodeRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveNodeRequest_Body.ProtoReflect.Descriptor instead. -func (*RemoveNodeRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *RemoveNodeRequest_Body) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *RemoveNodeRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -type RemoveNodeResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Valid until block value for transaction. - Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *RemoveNodeResponse_Body) Reset() { - *x = RemoveNodeResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveNodeResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveNodeResponse_Body) ProtoMessage() {} - -func (x *RemoveNodeResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveNodeResponse_Body.ProtoReflect.Descriptor instead. -func (*RemoveNodeResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *RemoveNodeResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -type RemoveContainerRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Owner []byte `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` - // Valid until block value override. - Vub uint32 `protobuf:"varint,3,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *RemoveContainerRequest_Body) Reset() { - *x = RemoveContainerRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveContainerRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveContainerRequest_Body) ProtoMessage() {} - -func (x *RemoveContainerRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveContainerRequest_Body.ProtoReflect.Descriptor instead. -func (*RemoveContainerRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *RemoveContainerRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *RemoveContainerRequest_Body) GetOwner() []byte { - if x != nil { - return x.Owner - } - return nil -} - -func (x *RemoveContainerRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -type RemoveContainerResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Valid until block value for transaction. - Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"` -} - -func (x *RemoveContainerResponse_Body) Reset() { - *x = RemoveContainerResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveContainerResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveContainerResponse_Body) ProtoMessage() {} - -func (x *RemoveContainerResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveContainerResponse_Body.ProtoReflect.Descriptor instead. -func (*RemoveContainerResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{7, 0} -} - -func (x *RemoveContainerResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} - -var File_pkg_services_control_ir_service_proto protoreflect.FileDescriptor - -var file_pkg_services_control_ir_service_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x1a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, - 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, - 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x44, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x3c, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x96, 0x01, - 0x0a, 0x10, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, - 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0x98, 0x01, 0x0a, 0x11, 0x54, 0x69, 0x63, 0x6b, 0x45, - 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, - 0x62, 0x22, 0xaa, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x1a, 0x2a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x76, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0x9a, - 0x01, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0xdb, 0x01, 0x0a, 0x16, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x51, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0xa4, 0x01, 0x0a, 0x17, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, - 0x32, 0xcb, 0x02, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x46, 0x0a, 0x09, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x1b, - 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, - 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x69, 0x72, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x69, 0x72, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, - 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, - 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, - 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_services_control_ir_service_proto_rawDescOnce sync.Once - file_pkg_services_control_ir_service_proto_rawDescData = file_pkg_services_control_ir_service_proto_rawDesc -) - -func file_pkg_services_control_ir_service_proto_rawDescGZIP() []byte { - file_pkg_services_control_ir_service_proto_rawDescOnce.Do(func() { - file_pkg_services_control_ir_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_service_proto_rawDescData) - }) - return file_pkg_services_control_ir_service_proto_rawDescData -} - -var file_pkg_services_control_ir_service_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_pkg_services_control_ir_service_proto_goTypes = []interface{}{ - (*HealthCheckRequest)(nil), // 0: ircontrol.HealthCheckRequest - (*HealthCheckResponse)(nil), // 1: ircontrol.HealthCheckResponse - (*TickEpochRequest)(nil), // 2: ircontrol.TickEpochRequest - (*TickEpochResponse)(nil), // 3: ircontrol.TickEpochResponse - (*RemoveNodeRequest)(nil), // 4: ircontrol.RemoveNodeRequest - (*RemoveNodeResponse)(nil), // 5: ircontrol.RemoveNodeResponse - (*RemoveContainerRequest)(nil), // 6: ircontrol.RemoveContainerRequest - (*RemoveContainerResponse)(nil), // 7: ircontrol.RemoveContainerResponse - (*HealthCheckRequest_Body)(nil), // 8: ircontrol.HealthCheckRequest.Body - (*HealthCheckResponse_Body)(nil), // 9: ircontrol.HealthCheckResponse.Body - (*TickEpochRequest_Body)(nil), // 10: ircontrol.TickEpochRequest.Body - (*TickEpochResponse_Body)(nil), // 11: ircontrol.TickEpochResponse.Body - (*RemoveNodeRequest_Body)(nil), // 12: ircontrol.RemoveNodeRequest.Body - (*RemoveNodeResponse_Body)(nil), // 13: ircontrol.RemoveNodeResponse.Body - (*RemoveContainerRequest_Body)(nil), // 14: ircontrol.RemoveContainerRequest.Body - (*RemoveContainerResponse_Body)(nil), // 15: ircontrol.RemoveContainerResponse.Body - (*Signature)(nil), // 16: ircontrol.Signature - (HealthStatus)(0), // 17: ircontrol.HealthStatus -} -var file_pkg_services_control_ir_service_proto_depIdxs = []int32{ - 8, // 0: ircontrol.HealthCheckRequest.body:type_name -> ircontrol.HealthCheckRequest.Body - 16, // 1: ircontrol.HealthCheckRequest.signature:type_name -> ircontrol.Signature - 9, // 2: ircontrol.HealthCheckResponse.body:type_name -> ircontrol.HealthCheckResponse.Body - 16, // 3: ircontrol.HealthCheckResponse.signature:type_name -> ircontrol.Signature - 10, // 4: ircontrol.TickEpochRequest.body:type_name -> ircontrol.TickEpochRequest.Body - 16, // 5: ircontrol.TickEpochRequest.signature:type_name -> ircontrol.Signature - 11, // 6: ircontrol.TickEpochResponse.body:type_name -> ircontrol.TickEpochResponse.Body - 16, // 7: ircontrol.TickEpochResponse.signature:type_name -> ircontrol.Signature - 12, // 8: ircontrol.RemoveNodeRequest.body:type_name -> ircontrol.RemoveNodeRequest.Body - 16, // 9: ircontrol.RemoveNodeRequest.signature:type_name -> ircontrol.Signature - 13, // 10: ircontrol.RemoveNodeResponse.body:type_name -> ircontrol.RemoveNodeResponse.Body - 16, // 11: ircontrol.RemoveNodeResponse.signature:type_name -> ircontrol.Signature - 14, // 12: ircontrol.RemoveContainerRequest.body:type_name -> ircontrol.RemoveContainerRequest.Body - 16, // 13: ircontrol.RemoveContainerRequest.signature:type_name -> ircontrol.Signature - 15, // 14: ircontrol.RemoveContainerResponse.body:type_name -> ircontrol.RemoveContainerResponse.Body - 16, // 15: ircontrol.RemoveContainerResponse.signature:type_name -> ircontrol.Signature - 17, // 16: ircontrol.HealthCheckResponse.Body.health_status:type_name -> ircontrol.HealthStatus - 0, // 17: ircontrol.ControlService.HealthCheck:input_type -> ircontrol.HealthCheckRequest - 2, // 18: ircontrol.ControlService.TickEpoch:input_type -> ircontrol.TickEpochRequest - 4, // 19: ircontrol.ControlService.RemoveNode:input_type -> ircontrol.RemoveNodeRequest - 6, // 20: ircontrol.ControlService.RemoveContainer:input_type -> ircontrol.RemoveContainerRequest - 1, // 21: ircontrol.ControlService.HealthCheck:output_type -> ircontrol.HealthCheckResponse - 3, // 22: ircontrol.ControlService.TickEpoch:output_type -> ircontrol.TickEpochResponse - 5, // 23: ircontrol.ControlService.RemoveNode:output_type -> ircontrol.RemoveNodeResponse - 7, // 24: ircontrol.ControlService.RemoveContainer:output_type -> ircontrol.RemoveContainerResponse - 21, // [21:25] is the sub-list for method output_type - 17, // [17:21] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name -} - -func init() { file_pkg_services_control_ir_service_proto_init() } -func file_pkg_services_control_ir_service_proto_init() { - if File_pkg_services_control_ir_service_proto != nil { - return - } - file_pkg_services_control_ir_types_proto_init() - if !protoimpl.UnsafeEnabled { - file_pkg_services_control_ir_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TickEpochRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TickEpochResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveNodeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveNodeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveContainerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveContainerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TickEpochRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TickEpochResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveNodeRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveNodeResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveContainerRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_ir_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveContainerResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_control_ir_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 16, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_services_control_ir_service_proto_goTypes, - DependencyIndexes: file_pkg_services_control_ir_service_proto_depIdxs, - MessageInfos: file_pkg_services_control_ir_service_proto_msgTypes, - }.Build() - File_pkg_services_control_ir_service_proto = out.File - file_pkg_services_control_ir_service_proto_rawDesc = nil - file_pkg_services_control_ir_service_proto_goTypes = nil - file_pkg_services_control_ir_service_proto_depIdxs = nil -} diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go index e22d0013f..d27746263 100644 --- a/pkg/services/control/ir/service_frostfs.pb.go +++ b/pkg/services/control/ir/service_frostfs.pb.go @@ -2,7 +2,27 @@ package control -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +import ( + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" +) + +type HealthCheckRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil) + _ json.Marshaler = (*HealthCheckRequest_Body)(nil) + _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -14,18 +34,93 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckRequest struct { + Body *HealthCheckRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil) + _ json.Marshaler = (*HealthCheckRequest)(nil) + _ json.Unmarshaler = (*HealthCheckRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -38,27 +133,6 @@ func (x *HealthCheckRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -74,13 +148,175 @@ func (x *HealthCheckRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthCheckRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthCheckRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { + x.Body = v +} +func (x *HealthCheckRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthCheckRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthCheckRequest_Body + f = new(HealthCheckRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckResponse_Body struct { + HealthStatus HealthStatus `json:"healthStatus"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil) + _ json.Marshaler = (*HealthCheckResponse_Body)(nil) + _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -92,26 +328,152 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.HealthStatus)) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.HealthStatus) != 0 { + mm.AppendInt32(1, int32(x.HealthStatus)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body") + } + switch fc.FieldNum { + case 1: // HealthStatus + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "HealthStatus") + } + x.HealthStatus = HealthStatus(data) + } + } + return nil +} +func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { + if x != nil { + return x.HealthStatus + } + return 0 +} +func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { + x.HealthStatus = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"healthStatus\":" + out.RawString(prefix) + v := int32(x.HealthStatus) + if vv, ok := HealthStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "healthStatus": + { + var f HealthStatus + var parsedValue HealthStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := HealthStatus_value[v]; ok { + parsedValue = HealthStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = HealthStatus(vv) + case float64: + parsedValue = HealthStatus(v) + } + f = parsedValue + x.HealthStatus = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckResponse struct { + Body *HealthCheckResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil) + _ json.Marshaler = (*HealthCheckResponse)(nil) + _ json.Unmarshaler = (*HealthCheckResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -124,27 +486,6 @@ func (x *HealthCheckResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -160,13 +501,175 @@ func (x *HealthCheckResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthCheckResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthCheckResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { + x.Body = v +} +func (x *HealthCheckResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthCheckResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthCheckResponse_Body + f = new(HealthCheckResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TickEpochRequest_Body struct { + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*TickEpochRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*TickEpochRequest_Body)(nil) + _ json.Marshaler = (*TickEpochRequest_Body)(nil) + _ json.Unmarshaler = (*TickEpochRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -178,26 +681,139 @@ func (x *TickEpochRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TickEpochRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TickEpochRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TickEpochRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Vub != 0 { + mm.AppendUint32(1, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TickEpochRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TickEpochRequest_Body") + } + switch fc.FieldNum { + case 1: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *TickEpochRequest_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *TickEpochRequest_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TickEpochRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TickEpochRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TickEpochRequest struct { + Body *TickEpochRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*TickEpochRequest)(nil) + _ encoding.ProtoUnmarshaler = (*TickEpochRequest)(nil) + _ json.Marshaler = (*TickEpochRequest)(nil) + _ json.Unmarshaler = (*TickEpochRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -210,27 +826,6 @@ func (x *TickEpochRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TickEpochRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -246,13 +841,175 @@ func (x *TickEpochRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *TickEpochRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *TickEpochRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TickEpochRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TickEpochRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TickEpochRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(TickEpochRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) { + x.Body = v +} +func (x *TickEpochRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *TickEpochRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TickEpochRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TickEpochRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TickEpochRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *TickEpochRequest_Body + f = new(TickEpochRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TickEpochResponse_Body struct { + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*TickEpochResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*TickEpochResponse_Body)(nil) + _ json.Marshaler = (*TickEpochResponse_Body)(nil) + _ json.Unmarshaler = (*TickEpochResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -264,26 +1021,139 @@ func (x *TickEpochResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TickEpochResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TickEpochResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TickEpochResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Vub != 0 { + mm.AppendUint32(1, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TickEpochResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TickEpochResponse_Body") + } + switch fc.FieldNum { + case 1: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *TickEpochResponse_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *TickEpochResponse_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TickEpochResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TickEpochResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TickEpochResponse struct { + Body *TickEpochResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*TickEpochResponse)(nil) + _ encoding.ProtoUnmarshaler = (*TickEpochResponse)(nil) + _ json.Marshaler = (*TickEpochResponse)(nil) + _ json.Unmarshaler = (*TickEpochResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -296,27 +1166,6 @@ func (x *TickEpochResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TickEpochResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -332,13 +1181,176 @@ func (x *TickEpochResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *TickEpochResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *TickEpochResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TickEpochResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TickEpochResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TickEpochResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(TickEpochResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) { + x.Body = v +} +func (x *TickEpochResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *TickEpochResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TickEpochResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TickEpochResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TickEpochResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *TickEpochResponse_Body + f = new(TickEpochResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveNodeRequest_Body struct { + Key []byte `json:"key"` + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveNodeRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest_Body)(nil) + _ json.Marshaler = (*RemoveNodeRequest_Body)(nil) + _ json.Unmarshaler = (*RemoveNodeRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -351,27 +1363,183 @@ func (x *RemoveNodeRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveNodeRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Key) - offset += proto.UInt32Marshal(2, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveNodeRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveNodeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendBytes(1, x.Key) + } + if x.Vub != 0 { + mm.AppendUint32(2, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveNodeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest_Body") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *RemoveNodeRequest_Body) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} +func (x *RemoveNodeRequest_Body) SetKey(v []byte) { + x.Key = v +} +func (x *RemoveNodeRequest_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *RemoveNodeRequest_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveNodeRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveNodeRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Key = f + } + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveNodeRequest struct { + Body *RemoveNodeRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveNodeRequest)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest)(nil) + _ json.Marshaler = (*RemoveNodeRequest)(nil) + _ json.Unmarshaler = (*RemoveNodeRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -384,27 +1552,6 @@ func (x *RemoveNodeRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveNodeRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -420,13 +1567,175 @@ func (x *RemoveNodeRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveNodeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveNodeRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveNodeRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveNodeRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveNodeRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) { + x.Body = v +} +func (x *RemoveNodeRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveNodeRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveNodeRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveNodeRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveNodeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveNodeRequest_Body + f = new(RemoveNodeRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveNodeResponse_Body struct { + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveNodeResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse_Body)(nil) + _ json.Marshaler = (*RemoveNodeResponse_Body)(nil) + _ json.Unmarshaler = (*RemoveNodeResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -438,26 +1747,139 @@ func (x *RemoveNodeResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveNodeResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveNodeResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveNodeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Vub != 0 { + mm.AppendUint32(1, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveNodeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse_Body") + } + switch fc.FieldNum { + case 1: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *RemoveNodeResponse_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *RemoveNodeResponse_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveNodeResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveNodeResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveNodeResponse struct { + Body *RemoveNodeResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveNodeResponse)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse)(nil) + _ json.Marshaler = (*RemoveNodeResponse)(nil) + _ json.Unmarshaler = (*RemoveNodeResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -470,27 +1892,6 @@ func (x *RemoveNodeResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveNodeResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -506,13 +1907,177 @@ func (x *RemoveNodeResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveNodeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveNodeResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveNodeResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveNodeResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveNodeResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) { + x.Body = v +} +func (x *RemoveNodeResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveNodeResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveNodeResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveNodeResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveNodeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveNodeResponse_Body + f = new(RemoveNodeResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveContainerRequest_Body struct { + ContainerId []byte `json:"containerId"` + Owner []byte `json:"owner"` + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveContainerRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest_Body)(nil) + _ json.Marshaler = (*RemoveContainerRequest_Body)(nil) + _ json.Unmarshaler = (*RemoveContainerRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -526,28 +2091,227 @@ func (x *RemoveContainerRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveContainerRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.BytesMarshal(2, buf[offset:], x.Owner) - offset += proto.UInt32Marshal(3, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveContainerRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveContainerRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.Owner) != 0 { + mm.AppendBytes(2, x.Owner) + } + if x.Vub != 0 { + mm.AppendUint32(3, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveContainerRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // Owner + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Owner") + } + x.Owner = data + case 3: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *RemoveContainerRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *RemoveContainerRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *RemoveContainerRequest_Body) GetOwner() []byte { + if x != nil { + return x.Owner + } + return nil +} +func (x *RemoveContainerRequest_Body) SetOwner(v []byte) { + x.Owner = v +} +func (x *RemoveContainerRequest_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *RemoveContainerRequest_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveContainerRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"owner\":" + out.RawString(prefix) + if x.Owner != nil { + out.Base64Bytes(x.Owner) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveContainerRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "owner": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Owner = f + } + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveContainerRequest struct { + Body *RemoveContainerRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveContainerRequest)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest)(nil) + _ json.Marshaler = (*RemoveContainerRequest)(nil) + _ json.Unmarshaler = (*RemoveContainerRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -560,27 +2324,6 @@ func (x *RemoveContainerRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveContainerRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -596,13 +2339,175 @@ func (x *RemoveContainerRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveContainerRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveContainerRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveContainerRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveContainerRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveContainerRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveContainerRequest) SetBody(v *RemoveContainerRequest_Body) { + x.Body = v +} +func (x *RemoveContainerRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveContainerRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveContainerRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveContainerRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveContainerRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveContainerRequest_Body + f = new(RemoveContainerRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveContainerResponse_Body struct { + Vub uint32 `json:"vub"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveContainerResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse_Body)(nil) + _ json.Marshaler = (*RemoveContainerResponse_Body)(nil) + _ json.Unmarshaler = (*RemoveContainerResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -614,26 +2519,139 @@ func (x *RemoveContainerResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveContainerResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Vub) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveContainerResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveContainerResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Vub != 0 { + mm.AppendUint32(1, x.Vub) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveContainerResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse_Body") + } + switch fc.FieldNum { + case 1: // Vub + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Vub") + } + x.Vub = data + } + } + return nil +} +func (x *RemoveContainerResponse_Body) GetVub() uint32 { + if x != nil { + return x.Vub + } + return 0 +} +func (x *RemoveContainerResponse_Body) SetVub(v uint32) { + x.Vub = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveContainerResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) + out.Uint32(x.Vub) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveContainerResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "vub": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Vub = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveContainerResponse struct { + Body *RemoveContainerResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveContainerResponse)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse)(nil) + _ json.Marshaler = (*RemoveContainerResponse)(nil) + _ json.Unmarshaler = (*RemoveContainerResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -646,27 +2664,6 @@ func (x *RemoveContainerResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveContainerResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -682,9 +2679,160 @@ func (x *RemoveContainerResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveContainerResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveContainerResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveContainerResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveContainerResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveContainerResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveContainerResponse) SetBody(v *RemoveContainerResponse_Body) { + x.Body = v +} +func (x *RemoveContainerResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveContainerResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveContainerResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveContainerResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveContainerResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveContainerResponse_Body + f = new(RemoveContainerResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go index 724149c44..336bf5f70 100644 --- a/pkg/services/control/ir/service_grpc.pb.go +++ b/pkg/services/control/ir/service_grpc.pb.go @@ -35,7 +35,8 @@ type ControlServiceClient interface { TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error) // Forces a node removal to be signaled by the IR node with high probability. RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) - // Forces a container removal to be signaled by the IR node with high probability. + // Forces a container removal to be signaled by the IR node with high + // probability. RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) } @@ -93,7 +94,8 @@ type ControlServiceServer interface { TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error) // Forces a node removal to be signaled by the IR node with high probability. RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) - // Forces a container removal to be signaled by the IR node with high probability. + // Forces a container removal to be signaled by the IR node with high + // probability. RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) } diff --git a/pkg/services/control/ir/service_test.go b/pkg/services/control/ir/service_test.go deleted file mode 100644 index 54eef5148..000000000 --- a/pkg/services/control/ir/service_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package control_test - -import ( - "testing" - - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -type protoMessage interface { - StableMarshal([]byte) []byte - proto.Message -} - -func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) { - require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2)) - - require.True(t, cmp(m1, m2)) -} - -func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - generateHealthCheckResponseBody(), - new(control.HealthCheckResponse_Body), - func(m1, m2 protoMessage) bool { - return equalHealthCheckResponseBodies( - m1.(*control.HealthCheckResponse_Body), - m2.(*control.HealthCheckResponse_Body), - ) - }, - ) -} - -func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body { - body := new(control.HealthCheckResponse_Body) - body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN) - - return body -} - -func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool { - return b1.GetHealthStatus() == b2.GetHealthStatus() -} diff --git a/pkg/services/control/ir/types.go b/pkg/services/control/ir/types.go deleted file mode 100644 index 97ffd3ce3..000000000 --- a/pkg/services/control/ir/types.go +++ /dev/null @@ -1,15 +0,0 @@ -package control - -// SetKey sets public key used for signing. -func (x *Signature) SetKey(v []byte) { - if x != nil { - x.Key = v - } -} - -// SetSign sets binary signature. -func (x *Signature) SetSign(v []byte) { - if x != nil { - x.Sign = v - } -} diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go deleted file mode 100644 index 828814b25..000000000 --- a/pkg/services/control/ir/types.pb.go +++ /dev/null @@ -1,224 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/control/ir/types.proto - -package control - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Health status of the IR application. -type HealthStatus int32 - -const ( - // Undefined status, default value. - HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 - // IR application is starting. - HealthStatus_STARTING HealthStatus = 1 - // IR application is started and serves all services. - HealthStatus_READY HealthStatus = 2 - // IR application is shutting down. - HealthStatus_SHUTTING_DOWN HealthStatus = 3 -) - -// Enum value maps for HealthStatus. -var ( - HealthStatus_name = map[int32]string{ - 0: "HEALTH_STATUS_UNDEFINED", - 1: "STARTING", - 2: "READY", - 3: "SHUTTING_DOWN", - } - HealthStatus_value = map[string]int32{ - "HEALTH_STATUS_UNDEFINED": 0, - "STARTING": 1, - "READY": 2, - "SHUTTING_DOWN": 3, - } -) - -func (x HealthStatus) Enum() *HealthStatus { - p := new(HealthStatus) - *p = x - return p -} - -func (x HealthStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HealthStatus) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_ir_types_proto_enumTypes[0].Descriptor() -} - -func (HealthStatus) Type() protoreflect.EnumType { - return &file_pkg_services_control_ir_types_proto_enumTypes[0] -} - -func (x HealthStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HealthStatus.Descriptor instead. -func (HealthStatus) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0} -} - -// Signature of some message. -type Signature struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Public key used for signing. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Binary signature. - Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"` -} - -func (x *Signature) Reset() { - *x = Signature{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_ir_types_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Signature) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Signature) ProtoMessage() {} - -func (x *Signature) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_ir_types_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Signature.ProtoReflect.Descriptor instead. -func (*Signature) Descriptor() ([]byte, []int) { - return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0} -} - -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} - -var File_pkg_services_control_ir_types_proto protoreflect.FileDescriptor - -var file_pkg_services_control_ir_types_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x57, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c, - 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, - 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11, - 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, - 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, - 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, - 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_services_control_ir_types_proto_rawDescOnce sync.Once - file_pkg_services_control_ir_types_proto_rawDescData = file_pkg_services_control_ir_types_proto_rawDesc -) - -func file_pkg_services_control_ir_types_proto_rawDescGZIP() []byte { - file_pkg_services_control_ir_types_proto_rawDescOnce.Do(func() { - file_pkg_services_control_ir_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_types_proto_rawDescData) - }) - return file_pkg_services_control_ir_types_proto_rawDescData -} - -var file_pkg_services_control_ir_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_pkg_services_control_ir_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pkg_services_control_ir_types_proto_goTypes = []interface{}{ - (HealthStatus)(0), // 0: ircontrol.HealthStatus - (*Signature)(nil), // 1: ircontrol.Signature -} -var file_pkg_services_control_ir_types_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_pkg_services_control_ir_types_proto_init() } -func file_pkg_services_control_ir_types_proto_init() { - if File_pkg_services_control_ir_types_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_services_control_ir_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Signature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_control_ir_types_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pkg_services_control_ir_types_proto_goTypes, - DependencyIndexes: file_pkg_services_control_ir_types_proto_depIdxs, - EnumInfos: file_pkg_services_control_ir_types_proto_enumTypes, - MessageInfos: file_pkg_services_control_ir_types_proto_msgTypes, - }.Build() - File_pkg_services_control_ir_types_proto = out.File - file_pkg_services_control_ir_types_proto_rawDesc = nil - file_pkg_services_control_ir_types_proto_goTypes = nil - file_pkg_services_control_ir_types_proto_depIdxs = nil -} diff --git a/pkg/services/control/ir/types.proto b/pkg/services/control/ir/types.proto index 9b6731cf8..901a55918 100644 --- a/pkg/services/control/ir/types.proto +++ b/pkg/services/control/ir/types.proto @@ -26,4 +26,7 @@ enum HealthStatus { // IR application is shutting down. SHUTTING_DOWN = 3; + + // IR application is reconfiguring. + RECONFIGURING = 4; } diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go index ef2fc458e..407eec6ad 100644 --- a/pkg/services/control/ir/types_frostfs.pb.go +++ b/pkg/services/control/ir/types_frostfs.pb.go @@ -2,7 +2,70 @@ package control -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +import ( + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" +) + +type HealthStatus int32 + +const ( + HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 + HealthStatus_STARTING HealthStatus = 1 + HealthStatus_READY HealthStatus = 2 + HealthStatus_SHUTTING_DOWN HealthStatus = 3 + HealthStatus_RECONFIGURING HealthStatus = 4 +) + +var ( + HealthStatus_name = map[int32]string{ + 0: "HEALTH_STATUS_UNDEFINED", + 1: "STARTING", + 2: "READY", + 3: "SHUTTING_DOWN", + 4: "RECONFIGURING", + } + HealthStatus_value = map[string]int32{ + "HEALTH_STATUS_UNDEFINED": 0, + "STARTING": 1, + "READY": 2, + "SHUTTING_DOWN": 3, + "RECONFIGURING": 4, + } +) + +func (x HealthStatus) String() string { + if v, ok := HealthStatus_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *HealthStatus) FromString(s string) bool { + if v, ok := HealthStatus_value[s]; ok { + *x = HealthStatus(v) + return true + } + return false +} + +type Signature struct { + Key []byte `json:"key"` + Sign []byte `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*Signature)(nil) + _ encoding.ProtoUnmarshaler = (*Signature)(nil) + _ json.Marshaler = (*Signature)(nil) + _ json.Unmarshaler = (*Signature)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -16,23 +79,169 @@ func (x *Signature) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *Signature) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Key) - offset += proto.BytesMarshal(2, buf[offset:], x.Sign) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *Signature) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendBytes(1, x.Key) + } + if len(x.Sign) != 0 { + mm.AppendBytes(2, x.Sign) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "Signature") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Sign + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Sign") + } + x.Sign = data + } + } + return nil +} +func (x *Signature) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} +func (x *Signature) SetKey(v []byte) { + x.Key = v +} +func (x *Signature) GetSign() []byte { + if x != nil { + return x.Sign + } + return nil +} +func (x *Signature) SetSign(v []byte) { + x.Sign = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *Signature) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *Signature) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Key = f + } + case "signature": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Sign = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index a90e58a65..0c4236d0e 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -1,8 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common" + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" ) const serviceName = "control.ControlService" @@ -15,7 +17,6 @@ const ( rpcListShards = "ListShards" rpcSetShardMode = "SetShardMode" rpcSynchronizeTree = "SynchronizeTree" - rpcEvacuateShard = "EvacuateShard" rpcStartShardEvacuation = "StartShardEvacuation" rpcGetShardEvacuationStatus = "GetShardEvacuationStatus" rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus" @@ -30,6 +31,8 @@ const ( rpcSealWriteCache = "SealWriteCache" rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" rpcDetachShards = "DetachShards" + rpcStartShardRebuild = "StartShardRebuild" + rpcListShardsForObject = "ListShardsForObject" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -73,6 +76,7 @@ func SetNetmapStatus( // GetNetmapStatus executes ControlService.GetNetmapStatus RPC. func GetNetmapStatus( + _ context.Context, cli *client.Client, req *GetNetmapStatusRequest, opts ...client.CallOption, @@ -161,19 +165,6 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl return wResp.message, nil } -// EvacuateShard executes ControlService.EvacuateShard RPC. -func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) { - wResp := newResponseWrapper[EvacuateShardResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - // StartShardEvacuation executes ControlService.StartShardEvacuation RPC. func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) { wResp := newResponseWrapper[StartShardEvacuationResponse]() @@ -291,7 +282,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride return wResp.message, nil } -// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. +// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) { wResp := newResponseWrapper[GetChainLocalOverrideResponse]() wReq := &requestWrapper{m: req} @@ -361,3 +352,35 @@ func DetachShards( return wResp.message, nil } + +// StartShardRebuild executes ControlService.StartShardRebuild RPC. +func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) { + wResp := newResponseWrapper[StartShardRebuildResponse]() + wReq := &requestWrapper{m: req} + + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...) + if err != nil { + return nil, err + } + + return wResp.message, nil +} + +// ListShardsForObject executes ControlService.ListShardsForObject RPC. +func ListShardsForObject( + cli *client.Client, + req *ListShardsForObjectRequest, + opts ...client.CallOption, +) (*ListShardsForObjectResponse, error) { + wResp := newResponseWrapper[ListShardsForObjectResponse]() + + wReq := &requestWrapper{ + m: req, + } + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) + if err != nil { + return nil, err + } + + return wResp.message, nil +} diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go index 31425b337..d9d5c5f5e 100644 --- a/pkg/services/control/server/ctrlmessage/sign.go +++ b/pkg/services/control/server/ctrlmessage/sign.go @@ -4,8 +4,8 @@ import ( "crypto/ecdsa" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" ) diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go index a4111bddb..ffd36962b 100644 --- a/pkg/services/control/server/detach_shards.go +++ b/pkg/services/control/server/detach_shards.go @@ -11,7 +11,7 @@ import ( "google.golang.org/grpc/status" ) -func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) { +func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) { err := s.isValidRequest(req) if err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -19,7 +19,7 @@ func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsReques shardIDs := s.getShardIDList(req.GetBody().GetShard_ID()) - if err := s.s.DetachShards(shardIDs); err != nil { + if err := s.s.DetachShards(ctx, shardIDs); err != nil { if errors.As(err, new(logicerr.Logical)) { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go deleted file mode 100644 index 0ba8be765..000000000 --- a/pkg/services/control/server/evacuate.go +++ /dev/null @@ -1,188 +0,0 @@ -package control - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") - -func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - Scope: engine.EvacuateScopeObjects, - } - - res, err := s.s.Evacuate(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.EvacuateShardResponse{ - Body: &control.EvacuateShardResponse_Body{ - Count: uint32(res.ObjectsEvacuated()), - }, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - cid, ok := obj.ContainerID() - if !ok { - // Return nil to prevent situations where a shard can't be evacuated - // because of a single bad/corrupted object. - return false, nil - } - - nodes, err := s.getContainerNodes(cid) - if err != nil { - return false, err - } - - if len(nodes) == 0 { - return false, nil - } - - var res replicatorResult - task := replicator.Task{ - NumCopies: 1, - Addr: addr, - Obj: obj, - Nodes: nodes, - } - s.replicator.HandleReplicationTask(ctx, task, &res) - - if res.count == 0 { - return false, errors.New("object was not replicated") - } - return true, nil -} - -func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(contID) - if err != nil { - return false, "", err - } - if len(nodes) == 0 { - return false, "", nil - } - - for _, node := range nodes { - err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) - if err == nil { - return true, hex.EncodeToString(node.PublicKey()), nil - } - } - return false, "", err -} - -func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { - rawCID := make([]byte, sha256.Size) - contID.Encode(rawCID) - - var height uint64 - for { - op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) - if err != nil { - return err - } - - if op.Time == 0 { - return nil - } - - req := &tree.ApplyRequest{ - Body: &tree.ApplyRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Operation: &tree.LogMove{ - ParentId: op.Parent, - Meta: op.Meta.Bytes(), - ChildId: op.Child, - }, - }, - } - - err = tree.SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("can't message apply request: %w", err) - } - - err = s.treeService.ReplicateTreeOp(ctx, node, req) - if err != nil { - return err - } - - height = op.Time + 1 - } -} - -func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(0) - if err != nil { - return nil, err - } - - c, err := s.cnrSrc.Get(contID) - if err != nil { - return nil, err - } - - binCnr := make([]byte, sha256.Size) - contID.Encode(binCnr) - - ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) - if err != nil { - return nil, errFailedToBuildListOfContainerNodes - } - - nodes := placement.FlattenNodes(ns) - bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { - if bytes.Equal(nodes[i].PublicKey(), bs) { - copy(nodes[i:], nodes[i+1:]) - nodes = nodes[:len(nodes)-1] - } - } - return nodes, nil -} - -type replicatorResult struct { - count int -} - -// SubmitSuccessfulReplication implements the replicator.TaskResult interface. -func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { - r.count++ -} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index b829573ec..f3ba9015e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -1,17 +1,32 @@ package control import ( + "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" + "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) +var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") + func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) { err := s.isValidRequest(req) if err != nil { @@ -23,16 +38,17 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha } prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - TreeHandler: s.replicateTree, - Async: true, - Scope: engine.EvacuateScope(req.GetBody().GetScope()), + ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), + IgnoreErrors: req.GetBody().GetIgnoreErrors(), + ObjectsHandler: s.replicateObject, + TreeHandler: s.replicateTree, + Scope: engine.EvacuateScope(req.GetBody().GetScope()), + ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), + ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), + RepOneOnly: req.GetBody().GetRepOneOnly(), } - _, err = s.s.Evacuate(ctx, prm) - if err != nil { + if err = s.s.Evacuate(ctx, prm); err != nil { var logicalErr logicerr.Logical if errors.As(err, &logicalErr) { return nil, status.Error(codes.Aborted, err.Error()) @@ -101,6 +117,9 @@ func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShard if err != nil { return nil, status.Error(codes.Internal, err.Error()) } + + s.s.ResetEvacuationStatusForShards() + return resp, nil } @@ -129,3 +148,133 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re } return resp, nil } + +func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { + cid, ok := obj.ContainerID() + if !ok { + // Return nil to prevent situations where a shard can't be evacuated + // because of a single bad/corrupted object. + return false, nil + } + + nodes, err := s.getContainerNodes(ctx, cid) + if err != nil { + return false, err + } + + if len(nodes) == 0 { + return false, nil + } + + var res replicatorResult + task := replicator.Task{ + NumCopies: 1, + Addr: addr, + Obj: obj, + Nodes: nodes, + } + s.replicator.HandleReplicationTask(ctx, task, &res) + + if res.count == 0 { + return false, errors.New("object was not replicated") + } + return true, nil +} + +func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { + nodes, err := s.getContainerNodes(ctx, contID) + if err != nil { + return false, "", err + } + if len(nodes) == 0 { + return false, "", nil + } + + for _, node := range nodes { + err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) + if err == nil { + return true, hex.EncodeToString(node.PublicKey()), nil + } + } + return false, "", err +} + +func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { + rawCID := make([]byte, sha256.Size) + contID.Encode(rawCID) + + var height uint64 + for { + op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) + if err != nil { + return err + } + + if op.Time == 0 { + return nil + } + + req := &tree.ApplyRequest{ + Body: &tree.ApplyRequest_Body{ + ContainerId: rawCID, + TreeId: treeID, + Operation: &tree.LogMove{ + ParentId: op.Parent, + Meta: op.Bytes(), + ChildId: op.Child, + }, + }, + } + + err = tree.SignMessage(req, s.key) + if err != nil { + return fmt.Errorf("can't message apply request: %w", err) + } + + err = s.treeService.ReplicateTreeOp(ctx, node, req) + if err != nil { + return err + } + + height = op.Time + 1 + } +} + +func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) { + nm, err := s.netMapSrc.GetNetMap(ctx, 0) + if err != nil { + return nil, err + } + + c, err := s.cnrSrc.Get(ctx, contID) + if err != nil { + return nil, err + } + + binCnr := make([]byte, sha256.Size) + contID.Encode(binCnr) + + ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) + if err != nil { + return nil, errFailedToBuildListOfContainerNodes + } + + nodes := placement.FlattenNodes(ns) + bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() + for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body + if bytes.Equal(nodes[i].PublicKey(), bs) { + copy(nodes[i:], nodes[i+1:]) + nodes = nodes[:len(nodes)-1] + } + } + return nodes, nil +} + +type replicatorResult struct { + count int +} + +// SubmitSuccessfulReplication implements the replicator.TaskResult interface. +func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { + r.count++ +} diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go index d9fefc38e..a8ef7809e 100644 --- a/pkg/services/control/server/gc.go +++ b/pkg/services/control/server/gc.go @@ -42,8 +42,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques prm.WithForceRemoval() prm.WithAddress(addrList[i]) - _, err := s.s.Delete(ctx, prm) - if err != nil && firstErr == nil { + if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil { firstErr = err } } diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go index 1c038253a..5e0496910 100644 --- a/pkg/services/control/server/get_netmap_status.go +++ b/pkg/services/control/server/get_netmap_status.go @@ -10,12 +10,12 @@ import ( ) // GetNetmapStatus gets node status in FrostFS network. -func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { +func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } - st, epoch, err := s.nodeState.GetNetmapStatus() + st, epoch, err := s.nodeState.GetNetmapStatus(ctx) if err != nil { return nil, err } diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go index d6531b947..efe2754ea 100644 --- a/pkg/services/control/server/list_shards.go +++ b/pkg/services/control/server/list_shards.go @@ -25,15 +25,15 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) ( info := s.s.DumpInfo() - shardInfos := make([]*control.ShardInfo, 0, len(info.Shards)) + shardInfos := make([]control.ShardInfo, 0, len(info.Shards)) for _, sh := range info.Shards { si := new(control.ShardInfo) - si.SetID(*sh.ID) + si.SetShard_ID(*sh.ID) si.SetMetabasePath(sh.MetaBaseInfo.Path) si.Blobstor = blobstorInfoToProto(sh.BlobStorInfo) - si.SetWriteCachePath(sh.WriteCacheInfo.Path) + si.SetWritecachePath(sh.WriteCacheInfo.Path) si.SetPiloramaPath(sh.PiloramaInfo.Path) var m control.ShardMode @@ -53,8 +53,9 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) ( si.SetMode(m) si.SetErrorCount(sh.ErrorCount) + si.SetEvacuationInProgress(sh.EvacuationInProgress) - shardInfos = append(shardInfos, si) + shardInfos = append(shardInfos, *si) } body.SetShards(shardInfos) @@ -67,10 +68,10 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) ( return resp, nil } -func blobstorInfoToProto(info blobstor.Info) []*control.BlobstorInfo { - res := make([]*control.BlobstorInfo, len(info.SubStorages)) +func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo { + res := make([]control.BlobstorInfo, len(info.SubStorages)) for i := range info.SubStorages { - res[i] = &control.BlobstorInfo{ + res[i] = control.BlobstorInfo{ Path: info.SubStorages[i].Path, Type: info.SubStorages[i].Type, } diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go new file mode 100644 index 000000000..39565ed50 --- /dev/null +++ b/pkg/services/control/server/list_shards_for_object.go @@ -0,0 +1,65 @@ +package control + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { + err := s.isValidRequest(req) + if err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + var obj oid.ID + err = obj.DecodeString(req.GetBody().GetObjectId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + var cnr cid.ID + err = cnr.DecodeString(req.GetBody().GetContainerId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + resp := new(control.ListShardsForObjectResponse) + body := new(control.ListShardsForObjectResponse_Body) + resp.SetBody(body) + + var objAddr oid.Address + objAddr.SetContainer(cnr) + objAddr.SetObject(obj) + info, err := s.s.ListShardsForObject(ctx, objAddr) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if len(info) == 0 { + return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) + } + + body.SetShard_ID(shardInfoToProto(info)) + + // Sign the response + if err := ctrlmessage.Sign(s.key, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return resp, nil +} + +func shardInfoToProto(infos []shard.Info) [][]byte { + shardInfos := make([][]byte, 0, len(infos)) + for _, info := range infos { + shardInfos = append(shardInfos, *info.ID) + } + + return shardInfos +} diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go index 98daac8a6..ab8258e27 100644 --- a/pkg/services/control/server/policy_engine.go +++ b/pkg/services/control/server/policy_engine.go @@ -220,13 +220,13 @@ func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListT if err != nil { return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) } - targets := make([]*control.ChainTarget, 0, len(apeTargets)) + targets := make([]control.ChainTarget, 0, len(apeTargets)) for i := range apeTargets { target, err := controlTarget(&apeTargets[i]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } - targets = append(targets, &target) + targets = append(targets, target) } resp := &control.ListTargetsLocalOverridesResponse{ diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go new file mode 100644 index 000000000..6ddfb8bf4 --- /dev/null +++ b/pkg/services/control/server/rebuild.go @@ -0,0 +1,59 @@ +package control + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) { + err := s.isValidRequest(req) + if err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit())) + } + + if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent())) + } + + prm := engine.RebuildPrm{ + ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()), + ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(), + TargetFillPercent: req.GetBody().GetTargetFillPercent(), + } + + res, err := s.s.Rebuild(ctx, prm) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}} + for _, r := range res.ShardResults { + if r.Success { + resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{ + Shard_ID: *r.ShardID, + Success: true, + }) + } else { + resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{ + Shard_ID: *r.ShardID, + Error: r.ErrorMsg, + }) + } + } + + err = ctrlmessage.Sign(s.key, resp) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go index e3f8b8caf..6799bdcac 100644 --- a/pkg/services/control/server/seal_writecache.go +++ b/pkg/services/control/server/seal_writecache.go @@ -19,6 +19,9 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache prm := engine.SealWriteCachePrm{ ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()), IgnoreErrors: req.GetBody().GetIgnoreErrors(), + Async: req.GetBody().GetAsync(), + RestoreMode: req.GetBody().GetRestoreMode(), + Shrink: req.GetBody().GetShrink(), } res, err := s.s.SealWriteCache(ctx, prm) @@ -29,12 +32,12 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}} for _, r := range res.ShardResults { if r.Success { - resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{ + resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{ Shard_ID: *r.ShardID, Success: true, }) } else { - resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{ + resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{ Shard_ID: *r.ShardID, Error: r.ErrorMsg, }) diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index f3fe56a46..59d701bc6 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -1,6 +1,7 @@ package control import ( + "context" "crypto/ecdsa" "sync/atomic" @@ -26,13 +27,13 @@ type Server struct { // HealthChecker is component interface for calculating // the current health status of a node. type HealthChecker interface { - // Must calculate and return current status of the node in FrostFS network map. + // NetmapStatus must calculate and return current status of the node in FrostFS network map. // // If status can not be calculated for any reason, // control.netmapStatus_STATUS_UNDEFINED should be returned. NetmapStatus() control.NetmapStatus - // Must calculate and return current health status of the node application. + // HealthStatus must calculate and return current health status of the node application. // // If status can not be calculated for any reason, // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. @@ -45,13 +46,13 @@ type NodeState interface { // // If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed // in the network settings, the node additionally starts local maintenance. - SetNetmapStatus(st control.NetmapStatus) error + SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error // ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE) // but starts local maintenance regardless of the network settings. - ForceMaintenance() error + ForceMaintenance(ctx context.Context) error - GetNetmapStatus() (control.NetmapStatus, uint64, error) + GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) } // LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go index 3fd69df12..529041dca 100644 --- a/pkg/services/control/server/set_netmap_status.go +++ b/pkg/services/control/server/set_netmap_status.go @@ -12,7 +12,7 @@ import ( // SetNetmapStatus sets node status in FrostFS network. // // If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { +func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { // verify request if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatus "force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE) } - err = s.nodeState.ForceMaintenance() + err = s.nodeState.ForceMaintenance(ctx) } else { - err = s.nodeState.SetNetmapStatus(st) + err = s.nodeState.SetNetmapStatus(ctx, st) } if err != nil { diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go index 52835c41d..4f8796263 100644 --- a/pkg/services/control/server/set_shard_mode.go +++ b/pkg/services/control/server/set_shard_mode.go @@ -11,7 +11,7 @@ import ( "google.golang.org/grpc/status" ) -func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) { +func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) { // verify request err := s.isValidRequest(req) if err != nil { @@ -38,7 +38,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques } for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) { - err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter()) + err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go index 514af273f..0e8e24b6e 100644 --- a/pkg/services/control/server/sign.go +++ b/pkg/services/control/server/sign.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" ) diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go deleted file mode 100644 index ef0c0a8d2..000000000 --- a/pkg/services/control/service.go +++ /dev/null @@ -1,142 +0,0 @@ -package control - -// SetBody sets health check request body. -func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetNetmapStatus sets status of the storage node in FrostFS network map. -func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) { - if x != nil { - x.NetmapStatus = v - } -} - -// SetHealthStatus sets health status of the storage node application. -func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { - if x != nil { - x.HealthStatus = v - } -} - -// SetBody sets health check response body. -func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetStatus sets new storage node status in FrostFS network map. -func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) { - if x != nil { - x.Status = v - } -} - -// SetForceMaintenance sets force_maintenance flag in the message. -func (x *SetNetmapStatusRequest_Body) SetForceMaintenance() { - x.ForceMaintenance = true -} - -// SetBody sets body of the set netmap status request . -func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets set body of the netmap status response. -func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetAddressList sets list of objects to be removed in FrostFS API binary format. -func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) { - if x != nil { - x.AddressList = v - } -} - -// SetBody sets body of the set "Drop objects" request. -func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets set body of the "Drop objects" response. -func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets list shards request body. -func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetShards sets shards of the storage node. -func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) { - if x != nil { - x.Shards = v - } -} - -// SetBody sets list shards response body. -func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetShardIDList sets shard ID whose mode is requested to be set. -func (x *SetShardModeRequest_Body) SetShardIDList(v [][]byte) { - if v != nil { - x.Shard_ID = v - } -} - -// SetMode sets mode of the shard. -func (x *SetShardModeRequest_Body) SetMode(v ShardMode) { - x.Mode = v -} - -// ClearErrorCounter sets flag signifying whether error counter for shard should be cleared. -func (x *SetShardModeRequest_Body) ClearErrorCounter(reset bool) { - x.ResetErrorCounter = reset -} - -// SetBody sets request body. -func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets body of the set shard mode response. -func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets list shards request body. -func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets list shards response body. -func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) { - if x != nil { - x.Body = v - } -} diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go deleted file mode 100644 index 727dd1218..000000000 --- a/pkg/services/control/service.pb.go +++ /dev/null @@ -1,7062 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/control/service.proto - -package control - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StartShardEvacuationRequest_Body_Scope int32 - -const ( - StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0 - StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1 - StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2 -) - -// Enum value maps for StartShardEvacuationRequest_Body_Scope. -var ( - StartShardEvacuationRequest_Body_Scope_name = map[int32]string{ - 0: "NONE", - 1: "OBJECTS", - 2: "TREES", - } - StartShardEvacuationRequest_Body_Scope_value = map[string]int32{ - "NONE": 0, - "OBJECTS": 1, - "TREES": 2, - } -) - -func (x StartShardEvacuationRequest_Body_Scope) Enum() *StartShardEvacuationRequest_Body_Scope { - p := new(StartShardEvacuationRequest_Body_Scope) - *p = x - return p -} - -func (x StartShardEvacuationRequest_Body_Scope) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (StartShardEvacuationRequest_Body_Scope) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_service_proto_enumTypes[0].Descriptor() -} - -func (StartShardEvacuationRequest_Body_Scope) Type() protoreflect.EnumType { - return &file_pkg_services_control_service_proto_enumTypes[0] -} - -func (x StartShardEvacuationRequest_Body_Scope) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use StartShardEvacuationRequest_Body_Scope.Descriptor instead. -func (StartShardEvacuationRequest_Body_Scope) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0, 0} -} - -// Evacuate status enum. -type GetShardEvacuationStatusResponse_Body_Status int32 - -const ( - GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0 - GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1 - GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2 -) - -// Enum value maps for GetShardEvacuationStatusResponse_Body_Status. -var ( - GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{ - 0: "EVACUATE_SHARD_STATUS_UNDEFINED", - 1: "RUNNING", - 2: "COMPLETED", - } - GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{ - "EVACUATE_SHARD_STATUS_UNDEFINED": 0, - "RUNNING": 1, - "COMPLETED": 2, - } -) - -func (x GetShardEvacuationStatusResponse_Body_Status) Enum() *GetShardEvacuationStatusResponse_Body_Status { - p := new(GetShardEvacuationStatusResponse_Body_Status) - *p = x - return p -} - -func (x GetShardEvacuationStatusResponse_Body_Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GetShardEvacuationStatusResponse_Body_Status) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_service_proto_enumTypes[1].Descriptor() -} - -func (GetShardEvacuationStatusResponse_Body_Status) Type() protoreflect.EnumType { - return &file_pkg_services_control_service_proto_enumTypes[1] -} - -func (x GetShardEvacuationStatusResponse_Body_Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GetShardEvacuationStatusResponse_Body_Status.Descriptor instead. -func (GetShardEvacuationStatusResponse_Body_Status) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 0} -} - -// Health check request. -type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of health check request message. - Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthCheckRequest) Reset() { - *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckRequest) ProtoMessage() {} - -func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. -func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0} -} - -func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthCheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Health check request. -type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of health check response message. - Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthCheckResponse) Reset() { - *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckResponse) ProtoMessage() {} - -func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. -func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1} -} - -func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthCheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Set netmap status request. -type SetNetmapStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of set netmap status request message. - Body *SetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SetNetmapStatusRequest) Reset() { - *x = SetNetmapStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNetmapStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNetmapStatusRequest) ProtoMessage() {} - -func (x *SetNetmapStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNetmapStatusRequest.ProtoReflect.Descriptor instead. -func (*SetNetmapStatusRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2} -} - -func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SetNetmapStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Set netmap status response. -type SetNetmapStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of set netmap status response message. - Body *SetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SetNetmapStatusResponse) Reset() { - *x = SetNetmapStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNetmapStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNetmapStatusResponse) ProtoMessage() {} - -func (x *SetNetmapStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNetmapStatusResponse.ProtoReflect.Descriptor instead. -func (*SetNetmapStatusResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3} -} - -func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SetNetmapStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Get netmap status request. -type GetNetmapStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of set netmap status request message. - Body *GetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetNetmapStatusRequest) Reset() { - *x = GetNetmapStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNetmapStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNetmapStatusRequest) ProtoMessage() {} - -func (x *GetNetmapStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNetmapStatusRequest.ProtoReflect.Descriptor instead. -func (*GetNetmapStatusRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4} -} - -func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetNetmapStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Get netmap status response. -type GetNetmapStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of get netmap status response message. - Body *GetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetNetmapStatusResponse) Reset() { - *x = GetNetmapStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNetmapStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNetmapStatusResponse) ProtoMessage() {} - -func (x *GetNetmapStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNetmapStatusResponse.ProtoReflect.Descriptor instead. -func (*GetNetmapStatusResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5} -} - -func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetNetmapStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Request to drop the objects. -type DropObjectsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of the request message. - Body *DropObjectsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DropObjectsRequest) Reset() { - *x = DropObjectsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropObjectsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropObjectsRequest) ProtoMessage() {} - -func (x *DropObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropObjectsRequest.ProtoReflect.Descriptor instead. -func (*DropObjectsRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6} -} - -func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DropObjectsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Response to request to drop the objects. -type DropObjectsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of the response message. - Body *DropObjectsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DropObjectsResponse) Reset() { - *x = DropObjectsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropObjectsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropObjectsResponse) ProtoMessage() {} - -func (x *DropObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropObjectsResponse.ProtoReflect.Descriptor instead. -func (*DropObjectsResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7} -} - -func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DropObjectsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Request to list all shards of the node. -type ListShardsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of the request message. - Body *ListShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListShardsRequest) Reset() { - *x = ListShardsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListShardsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListShardsRequest) ProtoMessage() {} - -func (x *ListShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListShardsRequest.ProtoReflect.Descriptor instead. -func (*ListShardsRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8} -} - -func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListShardsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ListShards response. -type ListShardsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of the response message. - Body *ListShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListShardsResponse) Reset() { - *x = ListShardsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListShardsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListShardsResponse) ProtoMessage() {} - -func (x *ListShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListShardsResponse.ProtoReflect.Descriptor instead. -func (*ListShardsResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9} -} - -func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListShardsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Request to set mode of the shard. -type SetShardModeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of set shard mode request message. - Body *SetShardModeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SetShardModeRequest) Reset() { - *x = SetShardModeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetShardModeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetShardModeRequest) ProtoMessage() {} - -func (x *SetShardModeRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetShardModeRequest.ProtoReflect.Descriptor instead. -func (*SetShardModeRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10} -} - -func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SetShardModeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// SetShardMode response. -type SetShardModeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of set shard mode response message. - Body *SetShardModeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SetShardModeResponse) Reset() { - *x = SetShardModeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetShardModeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetShardModeResponse) ProtoMessage() {} - -func (x *SetShardModeResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetShardModeResponse.ProtoReflect.Descriptor instead. -func (*SetShardModeResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11} -} - -func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SetShardModeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// SynchronizeTree request. -type SynchronizeTreeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of restore shard request message. - Body *SynchronizeTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SynchronizeTreeRequest) Reset() { - *x = SynchronizeTreeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SynchronizeTreeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SynchronizeTreeRequest) ProtoMessage() {} - -func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SynchronizeTreeRequest.ProtoReflect.Descriptor instead. -func (*SynchronizeTreeRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12} -} - -func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SynchronizeTreeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// SynchronizeTree response. -type SynchronizeTreeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Body of restore shard response message. - Body *SynchronizeTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Body signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SynchronizeTreeResponse) Reset() { - *x = SynchronizeTreeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SynchronizeTreeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SynchronizeTreeResponse) ProtoMessage() {} - -func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SynchronizeTreeResponse.ProtoReflect.Descriptor instead. -func (*SynchronizeTreeResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13} -} - -func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SynchronizeTreeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// EvacuateShard request. -type EvacuateShardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *EvacuateShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *EvacuateShardRequest) Reset() { - *x = EvacuateShardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvacuateShardRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvacuateShardRequest) ProtoMessage() {} - -func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvacuateShardRequest.ProtoReflect.Descriptor instead. -func (*EvacuateShardRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14} -} - -func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *EvacuateShardRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// EvacuateShard response. -type EvacuateShardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *EvacuateShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *EvacuateShardResponse) Reset() { - *x = EvacuateShardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvacuateShardResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvacuateShardResponse) ProtoMessage() {} - -func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvacuateShardResponse.ProtoReflect.Descriptor instead. -func (*EvacuateShardResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15} -} - -func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *EvacuateShardResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// FlushCache request. -type FlushCacheRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *FlushCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *FlushCacheRequest) Reset() { - *x = FlushCacheRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlushCacheRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlushCacheRequest) ProtoMessage() {} - -func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlushCacheRequest.ProtoReflect.Descriptor instead. -func (*FlushCacheRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16} -} - -func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *FlushCacheRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// FlushCache response. -type FlushCacheResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *FlushCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *FlushCacheResponse) Reset() { - *x = FlushCacheResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlushCacheResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlushCacheResponse) ProtoMessage() {} - -func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlushCacheResponse.ProtoReflect.Descriptor instead. -func (*FlushCacheResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17} -} - -func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *FlushCacheResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Doctor request. -type DoctorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *DoctorRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DoctorRequest) Reset() { - *x = DoctorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoctorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoctorRequest) ProtoMessage() {} - -func (x *DoctorRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoctorRequest.ProtoReflect.Descriptor instead. -func (*DoctorRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18} -} - -func (x *DoctorRequest) GetBody() *DoctorRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DoctorRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Doctor response. -type DoctorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *DoctorResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DoctorResponse) Reset() { - *x = DoctorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoctorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoctorResponse) ProtoMessage() {} - -func (x *DoctorResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoctorResponse.ProtoReflect.Descriptor instead. -func (*DoctorResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19} -} - -func (x *DoctorResponse) GetBody() *DoctorResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DoctorResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// StartShardEvacuation request. -type StartShardEvacuationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *StartShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *StartShardEvacuationRequest) Reset() { - *x = StartShardEvacuationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartShardEvacuationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartShardEvacuationRequest) ProtoMessage() {} - -func (x *StartShardEvacuationRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartShardEvacuationRequest.ProtoReflect.Descriptor instead. -func (*StartShardEvacuationRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20} -} - -func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *StartShardEvacuationRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// StartShardEvacuation response. -type StartShardEvacuationResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *StartShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *StartShardEvacuationResponse) Reset() { - *x = StartShardEvacuationResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartShardEvacuationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartShardEvacuationResponse) ProtoMessage() {} - -func (x *StartShardEvacuationResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartShardEvacuationResponse.ProtoReflect.Descriptor instead. -func (*StartShardEvacuationResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21} -} - -func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *StartShardEvacuationResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// GetShardEvacuationStatus request. -type GetShardEvacuationStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *GetShardEvacuationStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetShardEvacuationStatusRequest) Reset() { - *x = GetShardEvacuationStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusRequest) ProtoMessage() {} - -func (x *GetShardEvacuationStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusRequest.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22} -} - -func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// GetShardEvacuationStatus response. -type GetShardEvacuationStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *GetShardEvacuationStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetShardEvacuationStatusResponse) Reset() { - *x = GetShardEvacuationStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusResponse) ProtoMessage() {} - -func (x *GetShardEvacuationStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusResponse.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23} -} - -func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ResetShardEvacuationStatus request. -type ResetShardEvacuationStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ResetShardEvacuationStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ResetShardEvacuationStatusRequest) Reset() { - *x = ResetShardEvacuationStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResetShardEvacuationStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResetShardEvacuationStatusRequest) ProtoMessage() {} - -func (x *ResetShardEvacuationStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResetShardEvacuationStatusRequest.ProtoReflect.Descriptor instead. -func (*ResetShardEvacuationStatusRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{24} -} - -func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ResetShardEvacuationStatus response. -type ResetShardEvacuationStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ResetShardEvacuationStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ResetShardEvacuationStatusResponse) Reset() { - *x = ResetShardEvacuationStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResetShardEvacuationStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResetShardEvacuationStatusResponse) ProtoMessage() {} - -func (x *ResetShardEvacuationStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResetShardEvacuationStatusResponse.ProtoReflect.Descriptor instead. -func (*ResetShardEvacuationStatusResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{25} -} - -func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// StopShardEvacuation request. -type StopShardEvacuationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *StopShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *StopShardEvacuationRequest) Reset() { - *x = StopShardEvacuationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StopShardEvacuationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StopShardEvacuationRequest) ProtoMessage() {} - -func (x *StopShardEvacuationRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StopShardEvacuationRequest.ProtoReflect.Descriptor instead. -func (*StopShardEvacuationRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{26} -} - -func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *StopShardEvacuationRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// StopShardEvacuation response. -type StopShardEvacuationResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *StopShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *StopShardEvacuationResponse) Reset() { - *x = StopShardEvacuationResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StopShardEvacuationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StopShardEvacuationResponse) ProtoMessage() {} - -func (x *StopShardEvacuationResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StopShardEvacuationResponse.ProtoReflect.Descriptor instead. -func (*StopShardEvacuationResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{27} -} - -func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *StopShardEvacuationResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// AddChainLocalOverride request. -type AddChainLocalOverrideRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *AddChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddChainLocalOverrideRequest) Reset() { - *x = AddChainLocalOverrideRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddChainLocalOverrideRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddChainLocalOverrideRequest) ProtoMessage() {} - -func (x *AddChainLocalOverrideRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddChainLocalOverrideRequest.ProtoReflect.Descriptor instead. -func (*AddChainLocalOverrideRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{28} -} - -func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// AddChainLocalOverride response. -type AddChainLocalOverrideResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *AddChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddChainLocalOverrideResponse) Reset() { - *x = AddChainLocalOverrideResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddChainLocalOverrideResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddChainLocalOverrideResponse) ProtoMessage() {} - -func (x *AddChainLocalOverrideResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddChainLocalOverrideResponse.ProtoReflect.Descriptor instead. -func (*AddChainLocalOverrideResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{29} -} - -func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// GetChainLocalOverride request. -type GetChainLocalOverrideRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *GetChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetChainLocalOverrideRequest) Reset() { - *x = GetChainLocalOverrideRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetChainLocalOverrideRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetChainLocalOverrideRequest) ProtoMessage() {} - -func (x *GetChainLocalOverrideRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetChainLocalOverrideRequest.ProtoReflect.Descriptor instead. -func (*GetChainLocalOverrideRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{30} -} - -func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// GetChainLocalOverride response. -type GetChainLocalOverrideResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *GetChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetChainLocalOverrideResponse) Reset() { - *x = GetChainLocalOverrideResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetChainLocalOverrideResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetChainLocalOverrideResponse) ProtoMessage() {} - -func (x *GetChainLocalOverrideResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetChainLocalOverrideResponse.ProtoReflect.Descriptor instead. -func (*GetChainLocalOverrideResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{31} -} - -func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ListChainLocalOverrides request. -type ListChainLocalOverridesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ListChainLocalOverridesRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListChainLocalOverridesRequest) Reset() { - *x = ListChainLocalOverridesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListChainLocalOverridesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListChainLocalOverridesRequest) ProtoMessage() {} - -func (x *ListChainLocalOverridesRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListChainLocalOverridesRequest.ProtoReflect.Descriptor instead. -func (*ListChainLocalOverridesRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{32} -} - -func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListChainLocalOverridesRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ListChainLocalOverrides response. -type ListChainLocalOverridesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ListChainLocalOverridesResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListChainLocalOverridesResponse) Reset() { - *x = ListChainLocalOverridesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListChainLocalOverridesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListChainLocalOverridesResponse) ProtoMessage() {} - -func (x *ListChainLocalOverridesResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListChainLocalOverridesResponse.ProtoReflect.Descriptor instead. -func (*ListChainLocalOverridesResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{33} -} - -func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListChainLocalOverridesResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ListTargetsLocalOverrides request. -type ListTargetsLocalOverridesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ListTargetsLocalOverridesRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListTargetsLocalOverridesRequest) Reset() { - *x = ListTargetsLocalOverridesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTargetsLocalOverridesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTargetsLocalOverridesRequest) ProtoMessage() {} - -func (x *ListTargetsLocalOverridesRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTargetsLocalOverridesRequest.ProtoReflect.Descriptor instead. -func (*ListTargetsLocalOverridesRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{34} -} - -func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// ListTargetsLocalOverrides response. -type ListTargetsLocalOverridesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *ListTargetsLocalOverridesResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ListTargetsLocalOverridesResponse) Reset() { - *x = ListTargetsLocalOverridesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTargetsLocalOverridesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTargetsLocalOverridesResponse) ProtoMessage() {} - -func (x *ListTargetsLocalOverridesResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTargetsLocalOverridesResponse.ProtoReflect.Descriptor instead. -func (*ListTargetsLocalOverridesResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{35} -} - -func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveChainLocalOverrideRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveChainLocalOverrideRequest) Reset() { - *x = RemoveChainLocalOverrideRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverrideRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverrideRequest) ProtoMessage() {} - -func (x *RemoveChainLocalOverrideRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverrideRequest.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverrideRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{36} -} - -func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveChainLocalOverrideResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveChainLocalOverrideResponse) Reset() { - *x = RemoveChainLocalOverrideResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverrideResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverrideResponse) ProtoMessage() {} - -func (x *RemoveChainLocalOverrideResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverrideResponse.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverrideResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{37} -} - -func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveChainLocalOverridesByTargetRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveChainLocalOverridesByTargetRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveChainLocalOverridesByTargetRequest) Reset() { - *x = RemoveChainLocalOverridesByTargetRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverridesByTargetRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverridesByTargetRequest) ProtoMessage() {} - -func (x *RemoveChainLocalOverridesByTargetRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverridesByTargetRequest.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverridesByTargetRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{38} -} - -func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveChainLocalOverridesByTargetResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *RemoveChainLocalOverridesByTargetResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveChainLocalOverridesByTargetResponse) Reset() { - *x = RemoveChainLocalOverridesByTargetResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverridesByTargetResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverridesByTargetResponse) ProtoMessage() {} - -func (x *RemoveChainLocalOverridesByTargetResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverridesByTargetResponse.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverridesByTargetResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{39} -} - -func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type SealWriteCacheRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *SealWriteCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SealWriteCacheRequest) Reset() { - *x = SealWriteCacheRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SealWriteCacheRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SealWriteCacheRequest) ProtoMessage() {} - -func (x *SealWriteCacheRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SealWriteCacheRequest.ProtoReflect.Descriptor instead. -func (*SealWriteCacheRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{40} -} - -func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SealWriteCacheRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type SealWriteCacheResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *SealWriteCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *SealWriteCacheResponse) Reset() { - *x = SealWriteCacheResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SealWriteCacheResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SealWriteCacheResponse) ProtoMessage() {} - -func (x *SealWriteCacheResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SealWriteCacheResponse.ProtoReflect.Descriptor instead. -func (*SealWriteCacheResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41} -} - -func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *SealWriteCacheResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type DetachShardsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *DetachShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DetachShardsRequest) Reset() { - *x = DetachShardsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachShardsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachShardsRequest) ProtoMessage() {} - -func (x *DetachShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachShardsRequest.ProtoReflect.Descriptor instead. -func (*DetachShardsRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{42} -} - -func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DetachShardsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type DetachShardsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Body *DetachShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *DetachShardsResponse) Reset() { - *x = DetachShardsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachShardsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachShardsResponse) ProtoMessage() {} - -func (x *DetachShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachShardsResponse.ProtoReflect.Descriptor instead. -func (*DetachShardsResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{43} -} - -func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *DetachShardsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -// Health check request body. -type HealthCheckRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *HealthCheckRequest_Body) Reset() { - *x = HealthCheckRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckRequest_Body) ProtoMessage() {} - -func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead. -func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0, 0} -} - -// Health check response body -type HealthCheckResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Status of the storage node in FrostFS network map. - NetmapStatus NetmapStatus `protobuf:"varint,1,opt,name=netmap_status,json=netmapStatus,proto3,enum=control.NetmapStatus" json:"netmap_status,omitempty"` - // Health status of storage node application. - HealthStatus HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=control.HealthStatus" json:"health_status,omitempty"` -} - -func (x *HealthCheckResponse_Body) Reset() { - *x = HealthCheckResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckResponse_Body) ProtoMessage() {} - -func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead. -func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus { - if x != nil { - return x.NetmapStatus - } - return NetmapStatus_STATUS_UNDEFINED -} - -func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { - if x != nil { - return x.HealthStatus - } - return HealthStatus_HEALTH_STATUS_UNDEFINED -} - -// Set netmap status request body. -type SetNetmapStatusRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // New storage node status in FrostFS network map. - // If status is MAINTENANCE, the node checks whether maintenance is - // allowed in the network settings. In case of prohibition, the request - // is denied. Otherwise, node switches to local maintenance state. To - // force local maintenance, use `force_maintenance` flag. - Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"` - // MAINTENANCE status validation skip flag. If set, node starts local - // maintenance regardless of network settings. The flag MUST NOT be - // set for any other status. - ForceMaintenance bool `protobuf:"varint,2,opt,name=force_maintenance,json=forceMaintenance,proto3" json:"force_maintenance,omitempty"` -} - -func (x *SetNetmapStatusRequest_Body) Reset() { - *x = SetNetmapStatusRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNetmapStatusRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNetmapStatusRequest_Body) ProtoMessage() {} - -func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead. -func (*SetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus { - if x != nil { - return x.Status - } - return NetmapStatus_STATUS_UNDEFINED -} - -func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool { - if x != nil { - return x.ForceMaintenance - } - return false -} - -// Set netmap status response body -type SetNetmapStatusResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *SetNetmapStatusResponse_Body) Reset() { - *x = SetNetmapStatusResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetNetmapStatusResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetNetmapStatusResponse_Body) ProtoMessage() {} - -func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead. -func (*SetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3, 0} -} - -type GetNetmapStatusRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetNetmapStatusRequest_Body) Reset() { - *x = GetNetmapStatusRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNetmapStatusRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNetmapStatusRequest_Body) ProtoMessage() {} - -func (x *GetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead. -func (*GetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4, 0} -} - -type GetNetmapStatusResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Storage node status in FrostFS network map. - Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"` - // Network map epoch. - Epoch uint64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` -} - -func (x *GetNetmapStatusResponse_Body) Reset() { - *x = GetNetmapStatusResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNetmapStatusResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNetmapStatusResponse_Body) ProtoMessage() {} - -func (x *GetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead. -func (*GetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus { - if x != nil { - return x.Status - } - return NetmapStatus_STATUS_UNDEFINED -} - -func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 { - if x != nil { - return x.Epoch - } - return 0 -} - -// Request body structure. -type DropObjectsRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of object addresses to be removed. - // in FrostFS API binary format. - AddressList [][]byte `protobuf:"bytes,1,rep,name=address_list,json=addressList,proto3" json:"address_list,omitempty"` -} - -func (x *DropObjectsRequest_Body) Reset() { - *x = DropObjectsRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropObjectsRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropObjectsRequest_Body) ProtoMessage() {} - -func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropObjectsRequest_Body.ProtoReflect.Descriptor instead. -func (*DropObjectsRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *DropObjectsRequest_Body) GetAddressList() [][]byte { - if x != nil { - return x.AddressList - } - return nil -} - -// Response body structure. -type DropObjectsResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *DropObjectsResponse_Body) Reset() { - *x = DropObjectsResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropObjectsResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropObjectsResponse_Body) ProtoMessage() {} - -func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropObjectsResponse_Body.ProtoReflect.Descriptor instead. -func (*DropObjectsResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7, 0} -} - -// Request body structure. -type ListShardsRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListShardsRequest_Body) Reset() { - *x = ListShardsRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListShardsRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListShardsRequest_Body) ProtoMessage() {} - -func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListShardsRequest_Body.ProtoReflect.Descriptor instead. -func (*ListShardsRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8, 0} -} - -// Response body structure. -type ListShardsResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of the node's shards. - Shards []*ShardInfo `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` -} - -func (x *ListShardsResponse_Body) Reset() { - *x = ListShardsResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListShardsResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListShardsResponse_Body) ProtoMessage() {} - -func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListShardsResponse_Body.ProtoReflect.Descriptor instead. -func (*ListShardsResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *ListShardsResponse_Body) GetShards() []*ShardInfo { - if x != nil { - return x.Shards - } - return nil -} - -// Request body structure. -type SetShardModeRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the shard. - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Mode that requested to be set. - Mode ShardMode `protobuf:"varint,2,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"` - // Flag signifying whether error counter should be set to 0. - ResetErrorCounter bool `protobuf:"varint,3,opt,name=resetErrorCounter,proto3" json:"resetErrorCounter,omitempty"` -} - -func (x *SetShardModeRequest_Body) Reset() { - *x = SetShardModeRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetShardModeRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetShardModeRequest_Body) ProtoMessage() {} - -func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetShardModeRequest_Body.ProtoReflect.Descriptor instead. -func (*SetShardModeRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0} -} - -func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *SetShardModeRequest_Body) GetMode() ShardMode { - if x != nil { - return x.Mode - } - return ShardMode_SHARD_MODE_UNDEFINED -} - -func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool { - if x != nil { - return x.ResetErrorCounter - } - return false -} - -// Response body structure. -type SetShardModeResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *SetShardModeResponse_Body) Reset() { - *x = SetShardModeResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetShardModeResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetShardModeResponse_Body) ProtoMessage() {} - -func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetShardModeResponse_Body.ProtoReflect.Descriptor instead. -func (*SetShardModeResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0} -} - -// Request body structure. -type SynchronizeTreeRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // Starting height for the synchronization. Can be omitted. - Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` -} - -func (x *SynchronizeTreeRequest_Body) Reset() { - *x = SynchronizeTreeRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SynchronizeTreeRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SynchronizeTreeRequest_Body) ProtoMessage() {} - -func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SynchronizeTreeRequest_Body.ProtoReflect.Descriptor instead. -func (*SynchronizeTreeRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *SynchronizeTreeRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 { - if x != nil { - return x.Height - } - return 0 -} - -// Response body structure. -type SynchronizeTreeResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *SynchronizeTreeResponse_Body) Reset() { - *x = SynchronizeTreeResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SynchronizeTreeResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SynchronizeTreeResponse_Body) ProtoMessage() {} - -func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SynchronizeTreeResponse_Body.ProtoReflect.Descriptor instead. -func (*SynchronizeTreeResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0} -} - -// Request body structure. -type EvacuateShardRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the shard. - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Flag indicating whether object read errors should be ignored. - IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"` -} - -func (x *EvacuateShardRequest_Body) Reset() { - *x = EvacuateShardRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvacuateShardRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvacuateShardRequest_Body) ProtoMessage() {} - -func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvacuateShardRequest_Body.ProtoReflect.Descriptor instead. -func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0} -} - -func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} - -// Response body structure. -type EvacuateShardResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` -} - -func (x *EvacuateShardResponse_Body) Reset() { - *x = EvacuateShardResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EvacuateShardResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvacuateShardResponse_Body) ProtoMessage() {} - -func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvacuateShardResponse_Body.ProtoReflect.Descriptor instead. -func (*EvacuateShardResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0} -} - -func (x *EvacuateShardResponse_Body) GetCount() uint32 { - if x != nil { - return x.Count - } - return 0 -} - -// Request body structure. -type FlushCacheRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the shard. - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // If true, then writecache will be left in read-only mode after flush - // completed. - Seal bool `protobuf:"varint,2,opt,name=seal,proto3" json:"seal,omitempty"` -} - -func (x *FlushCacheRequest_Body) Reset() { - *x = FlushCacheRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlushCacheRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlushCacheRequest_Body) ProtoMessage() {} - -func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlushCacheRequest_Body.ProtoReflect.Descriptor instead. -func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0} -} - -func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *FlushCacheRequest_Body) GetSeal() bool { - if x != nil { - return x.Seal - } - return false -} - -// Response body structure. -type FlushCacheResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *FlushCacheResponse_Body) Reset() { - *x = FlushCacheResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FlushCacheResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FlushCacheResponse_Body) ProtoMessage() {} - -func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FlushCacheResponse_Body.ProtoReflect.Descriptor instead. -func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0} -} - -// Request body structure. -type DoctorRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Number of threads to use for the operation. - Concurrency uint32 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"` - // Flag to search engine for duplicate objects and leave only one copy. - RemoveDuplicates bool `protobuf:"varint,2,opt,name=remove_duplicates,json=removeDuplicates,proto3" json:"remove_duplicates,omitempty"` -} - -func (x *DoctorRequest_Body) Reset() { - *x = DoctorRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoctorRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoctorRequest_Body) ProtoMessage() {} - -func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoctorRequest_Body.ProtoReflect.Descriptor instead. -func (*DoctorRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0} -} - -func (x *DoctorRequest_Body) GetConcurrency() uint32 { - if x != nil { - return x.Concurrency - } - return 0 -} - -func (x *DoctorRequest_Body) GetRemoveDuplicates() bool { - if x != nil { - return x.RemoveDuplicates - } - return false -} - -// Response body structure. -type DoctorResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *DoctorResponse_Body) Reset() { - *x = DoctorResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoctorResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoctorResponse_Body) ProtoMessage() {} - -func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoctorResponse_Body.ProtoReflect.Descriptor instead. -func (*DoctorResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0} -} - -// Request body structure. -type StartShardEvacuationRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // IDs of the shards. - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Flag indicating whether object read errors should be ignored. - IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"` - // Evacuation scope. - Scope uint32 `protobuf:"varint,3,opt,name=scope,proto3" json:"scope,omitempty"` -} - -func (x *StartShardEvacuationRequest_Body) Reset() { - *x = StartShardEvacuationRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartShardEvacuationRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartShardEvacuationRequest_Body) ProtoMessage() {} - -func (x *StartShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartShardEvacuationRequest_Body.ProtoReflect.Descriptor instead. -func (*StartShardEvacuationRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0} -} - -func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} - -func (x *StartShardEvacuationRequest_Body) GetScope() uint32 { - if x != nil { - return x.Scope - } - return 0 -} - -// Response body structure. -type StartShardEvacuationResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartShardEvacuationResponse_Body) Reset() { - *x = StartShardEvacuationResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartShardEvacuationResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartShardEvacuationResponse_Body) ProtoMessage() {} - -func (x *StartShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartShardEvacuationResponse_Body.ProtoReflect.Descriptor instead. -func (*StartShardEvacuationResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0} -} - -// Request body structure. -type GetShardEvacuationStatusRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetShardEvacuationStatusRequest_Body) Reset() { - *x = GetShardEvacuationStatusRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusRequest_Body) ProtoMessage() {} - -func (x *GetShardEvacuationStatusRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusRequest_Body.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22, 0} -} - -// Response body structure. -type GetShardEvacuationStatusResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Total objects to evacuate count. The value is approximate, so evacuated + - // failed + skipped == total is not guaranteed after completion. - TotalObjects uint64 `protobuf:"varint,1,opt,name=total_objects,json=totalObjects,proto3" json:"total_objects,omitempty"` - // Evacuated objects count. - EvacuatedObjects uint64 `protobuf:"varint,2,opt,name=evacuated_objects,json=evacuatedObjects,proto3" json:"evacuated_objects,omitempty"` - // Failed objects count. - FailedObjects uint64 `protobuf:"varint,3,opt,name=failed_objects,json=failedObjects,proto3" json:"failed_objects,omitempty"` - // Shard IDs. - Shard_ID [][]byte `protobuf:"bytes,4,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Evacuation process status. - Status GetShardEvacuationStatusResponse_Body_Status `protobuf:"varint,5,opt,name=status,proto3,enum=control.GetShardEvacuationStatusResponse_Body_Status" json:"status,omitempty"` - // Evacuation process duration. - Duration *GetShardEvacuationStatusResponse_Body_Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` - // Evacuation process started at timestamp. - StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - // Error message if evacuation failed. - ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - // Skipped objects count. - SkippedObjects uint64 `protobuf:"varint,9,opt,name=skipped_objects,json=skippedObjects,proto3" json:"skipped_objects,omitempty"` - // Total trees to evacuate count. - TotalTrees uint64 `protobuf:"varint,10,opt,name=total_trees,json=totalTrees,proto3" json:"total_trees,omitempty"` - // Evacuated trees count. - EvacuatedTrees uint64 `protobuf:"varint,11,opt,name=evacuated_trees,json=evacuatedTrees,proto3" json:"evacuated_trees,omitempty"` - // Failed trees count. - FailedTrees uint64 `protobuf:"varint,12,opt,name=failed_trees,json=failedTrees,proto3" json:"failed_trees,omitempty"` -} - -func (x *GetShardEvacuationStatusResponse_Body) Reset() { - *x = GetShardEvacuationStatusResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusResponse_Body) ProtoMessage() {} - -func (x *GetShardEvacuationStatusResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusResponse_Body.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0} -} - -func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 { - if x != nil { - return x.TotalObjects - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 { - if x != nil { - return x.EvacuatedObjects - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 { - if x != nil { - return x.FailedObjects - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status { - if x != nil { - return x.Status - } - return GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED -} - -func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration { - if x != nil { - return x.Duration - } - return nil -} - -func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp { - if x != nil { - return x.StartedAt - } - return nil -} - -func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 { - if x != nil { - return x.SkippedObjects - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 { - if x != nil { - return x.TotalTrees - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 { - if x != nil { - return x.EvacuatedTrees - } - return 0 -} - -func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 { - if x != nil { - return x.FailedTrees - } - return 0 -} - -// Unix timestamp value. -type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) Reset() { - *x = GetShardEvacuationStatusResponse_Body_UnixTimestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoMessage() {} - -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusResponse_Body_UnixTimestamp.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 0} -} - -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 { - if x != nil { - return x.Value - } - return 0 -} - -// Duration in seconds. -type GetShardEvacuationStatusResponse_Body_Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` -} - -func (x *GetShardEvacuationStatusResponse_Body_Duration) Reset() { - *x = GetShardEvacuationStatusResponse_Body_Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetShardEvacuationStatusResponse_Body_Duration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetShardEvacuationStatusResponse_Body_Duration) ProtoMessage() {} - -func (x *GetShardEvacuationStatusResponse_Body_Duration) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetShardEvacuationStatusResponse_Body_Duration.ProtoReflect.Descriptor instead. -func (*GetShardEvacuationStatusResponse_Body_Duration) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 1} -} - -func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -type ResetShardEvacuationStatusRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ResetShardEvacuationStatusRequest_Body) Reset() { - *x = ResetShardEvacuationStatusRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResetShardEvacuationStatusRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResetShardEvacuationStatusRequest_Body) ProtoMessage() {} - -func (x *ResetShardEvacuationStatusRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResetShardEvacuationStatusRequest_Body.ProtoReflect.Descriptor instead. -func (*ResetShardEvacuationStatusRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{24, 0} -} - -type ResetShardEvacuationStatusResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ResetShardEvacuationStatusResponse_Body) Reset() { - *x = ResetShardEvacuationStatusResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResetShardEvacuationStatusResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResetShardEvacuationStatusResponse_Body) ProtoMessage() {} - -func (x *ResetShardEvacuationStatusResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResetShardEvacuationStatusResponse_Body.ProtoReflect.Descriptor instead. -func (*ResetShardEvacuationStatusResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{25, 0} -} - -// Request body structure. -type StopShardEvacuationRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StopShardEvacuationRequest_Body) Reset() { - *x = StopShardEvacuationRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StopShardEvacuationRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StopShardEvacuationRequest_Body) ProtoMessage() {} - -func (x *StopShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StopShardEvacuationRequest_Body.ProtoReflect.Descriptor instead. -func (*StopShardEvacuationRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{26, 0} -} - -// Response body structure. -type StopShardEvacuationResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StopShardEvacuationResponse_Body) Reset() { - *x = StopShardEvacuationResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StopShardEvacuationResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StopShardEvacuationResponse_Body) ProtoMessage() {} - -func (x *StopShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StopShardEvacuationResponse_Body.ProtoReflect.Descriptor instead. -func (*StopShardEvacuationResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{27, 0} -} - -type AddChainLocalOverrideRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - // Serialized rule chain. If chain ID is left empty - // in the chain, then it will be generated and returned - // in the response. - Chain []byte `protobuf:"bytes,2,opt,name=chain,proto3" json:"chain,omitempty"` -} - -func (x *AddChainLocalOverrideRequest_Body) Reset() { - *x = AddChainLocalOverrideRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddChainLocalOverrideRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddChainLocalOverrideRequest_Body) ProtoMessage() {} - -func (x *AddChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead. -func (*AddChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{28, 0} -} - -func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} - -func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte { - if x != nil { - return x.Chain - } - return nil -} - -type AddChainLocalOverrideResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Chain ID assigned for the added rule chain. - // If chain ID is left empty in the request, then - // it will be generated. - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` -} - -func (x *AddChainLocalOverrideResponse_Body) Reset() { - *x = AddChainLocalOverrideResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddChainLocalOverrideResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddChainLocalOverrideResponse_Body) ProtoMessage() {} - -func (x *AddChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead. -func (*AddChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{29, 0} -} - -func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} - -type GetChainLocalOverrideRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - // Chain ID assigned for the added rule chain. - ChainId []byte `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` -} - -func (x *GetChainLocalOverrideRequest_Body) Reset() { - *x = GetChainLocalOverrideRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetChainLocalOverrideRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetChainLocalOverrideRequest_Body) ProtoMessage() {} - -func (x *GetChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead. -func (*GetChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{30, 0} -} - -func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} - -func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} - -type GetChainLocalOverrideResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized rule chain. - Chain []byte `protobuf:"bytes,1,opt,name=chain,proto3" json:"chain,omitempty"` -} - -func (x *GetChainLocalOverrideResponse_Body) Reset() { - *x = GetChainLocalOverrideResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetChainLocalOverrideResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetChainLocalOverrideResponse_Body) ProtoMessage() {} - -func (x *GetChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead. -func (*GetChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{31, 0} -} - -func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte { - if x != nil { - return x.Chain - } - return nil -} - -type ListChainLocalOverridesRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` -} - -func (x *ListChainLocalOverridesRequest_Body) Reset() { - *x = ListChainLocalOverridesRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[78] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListChainLocalOverridesRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListChainLocalOverridesRequest_Body) ProtoMessage() {} - -func (x *ListChainLocalOverridesRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[78] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListChainLocalOverridesRequest_Body.ProtoReflect.Descriptor instead. -func (*ListChainLocalOverridesRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{32, 0} -} - -func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} - -type ListChainLocalOverridesResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of serialized rule chain. - Chains [][]byte `protobuf:"bytes,1,rep,name=chains,proto3" json:"chains,omitempty"` -} - -func (x *ListChainLocalOverridesResponse_Body) Reset() { - *x = ListChainLocalOverridesResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[79] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListChainLocalOverridesResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListChainLocalOverridesResponse_Body) ProtoMessage() {} - -func (x *ListChainLocalOverridesResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[79] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListChainLocalOverridesResponse_Body.ProtoReflect.Descriptor instead. -func (*ListChainLocalOverridesResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{33, 0} -} - -func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte { - if x != nil { - return x.Chains - } - return nil -} - -type ListTargetsLocalOverridesRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - ChainName string `protobuf:"bytes,1,opt,name=chainName,proto3" json:"chainName,omitempty"` -} - -func (x *ListTargetsLocalOverridesRequest_Body) Reset() { - *x = ListTargetsLocalOverridesRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[80] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTargetsLocalOverridesRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTargetsLocalOverridesRequest_Body) ProtoMessage() {} - -func (x *ListTargetsLocalOverridesRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[80] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTargetsLocalOverridesRequest_Body.ProtoReflect.Descriptor instead. -func (*ListTargetsLocalOverridesRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{34, 0} -} - -func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string { - if x != nil { - return x.ChainName - } - return "" -} - -type ListTargetsLocalOverridesResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of chain targets. - Targets []*ChainTarget `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` -} - -func (x *ListTargetsLocalOverridesResponse_Body) Reset() { - *x = ListTargetsLocalOverridesResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[81] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTargetsLocalOverridesResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTargetsLocalOverridesResponse_Body) ProtoMessage() {} - -func (x *ListTargetsLocalOverridesResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[81] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTargetsLocalOverridesResponse_Body.ProtoReflect.Descriptor instead. -func (*ListTargetsLocalOverridesResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{35, 0} -} - -func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []*ChainTarget { - if x != nil { - return x.Targets - } - return nil -} - -type RemoveChainLocalOverrideRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - // Chain ID assigned for the added rule chain. - ChainId []byte `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` -} - -func (x *RemoveChainLocalOverrideRequest_Body) Reset() { - *x = RemoveChainLocalOverrideRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[82] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverrideRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverrideRequest_Body) ProtoMessage() {} - -func (x *RemoveChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[82] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{36, 0} -} - -func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} - -func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} - -type RemoveChainLocalOverrideResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RemoveChainLocalOverrideResponse_Body) Reset() { - *x = RemoveChainLocalOverrideResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[83] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverrideResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverrideResponse_Body) ProtoMessage() {} - -func (x *RemoveChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[83] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{37, 0} -} - -type RemoveChainLocalOverridesByTargetRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Target for which the overrides are applied. - Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` -} - -func (x *RemoveChainLocalOverridesByTargetRequest_Body) Reset() { - *x = RemoveChainLocalOverridesByTargetRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[84] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverridesByTargetRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverridesByTargetRequest_Body) ProtoMessage() {} - -func (x *RemoveChainLocalOverridesByTargetRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[84] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverridesByTargetRequest_Body.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverridesByTargetRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{38, 0} -} - -func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} - -type RemoveChainLocalOverridesByTargetResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RemoveChainLocalOverridesByTargetResponse_Body) Reset() { - *x = RemoveChainLocalOverridesByTargetResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[85] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveChainLocalOverridesByTargetResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveChainLocalOverridesByTargetResponse_Body) ProtoMessage() {} - -func (x *RemoveChainLocalOverridesByTargetResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[85] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveChainLocalOverridesByTargetResponse_Body.ProtoReflect.Descriptor instead. -func (*RemoveChainLocalOverridesByTargetResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{39, 0} -} - -// Request body structure. -type SealWriteCacheRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the shard. - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Flag indicating whether object read errors should be ignored. - IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"` -} - -func (x *SealWriteCacheRequest_Body) Reset() { - *x = SealWriteCacheRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SealWriteCacheRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SealWriteCacheRequest_Body) ProtoMessage() {} - -func (x *SealWriteCacheRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[86] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SealWriteCacheRequest_Body.ProtoReflect.Descriptor instead. -func (*SealWriteCacheRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{40, 0} -} - -func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} - -type SealWriteCacheResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Results []*SealWriteCacheResponse_Body_Status `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` -} - -func (x *SealWriteCacheResponse_Body) Reset() { - *x = SealWriteCacheResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[87] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SealWriteCacheResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SealWriteCacheResponse_Body) ProtoMessage() {} - -func (x *SealWriteCacheResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[87] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SealWriteCacheResponse_Body.ProtoReflect.Descriptor instead. -func (*SealWriteCacheResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41, 0} -} - -func (x *SealWriteCacheResponse_Body) GetResults() []*SealWriteCacheResponse_Body_Status { - if x != nil { - return x.Results - } - return nil -} - -type SealWriteCacheResponse_Body_Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *SealWriteCacheResponse_Body_Status) Reset() { - *x = SealWriteCacheResponse_Body_Status{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[88] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SealWriteCacheResponse_Body_Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SealWriteCacheResponse_Body_Status) ProtoMessage() {} - -func (x *SealWriteCacheResponse_Body_Status) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[88] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SealWriteCacheResponse_Body_Status.ProtoReflect.Descriptor instead. -func (*SealWriteCacheResponse_Body_Status) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41, 0, 0} -} - -func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *SealWriteCacheResponse_Body_Status) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -type DetachShardsRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` -} - -func (x *DetachShardsRequest_Body) Reset() { - *x = DetachShardsRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[89] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachShardsRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachShardsRequest_Body) ProtoMessage() {} - -func (x *DetachShardsRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[89] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachShardsRequest_Body.ProtoReflect.Descriptor instead. -func (*DetachShardsRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{42, 0} -} - -func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -type DetachShardsResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *DetachShardsResponse_Body) Reset() { - *x = DetachShardsResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_service_proto_msgTypes[90] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DetachShardsResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DetachShardsResponse_Body) ProtoMessage() {} - -func (x *DetachShardsResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_service_proto_msgTypes[90] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DetachShardsResponse_Body.ProtoReflect.Descriptor instead. -func (*DetachShardsResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_control_service_proto_rawDescGZIP(), []int{43, 0} -} - -var File_pkg_services_control_service_proto protoreflect.FileDescriptor - -var file_pkg_services_control_service_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x20, 0x70, - 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x84, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x3a, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6e, - 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x4e, - 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, - 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x6d, - 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, - 0x63, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, - 0x6f, 0x64, 0x79, 0x22, 0x8c, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, - 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x22, 0xd3, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x4b, 0x0a, 0x04, 0x42, - 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65, - 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x44, 0x72, 0x6f, - 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x21, 0x0a, 0x0c, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, - 0x73, 0x74, 0x22, 0x86, 0x01, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x82, 0x01, 0x0a, 0x11, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, - 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x22, 0xb0, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0x32, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x1a, 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, - 0x2c, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x88, 0x01, - 0x0a, 0x14, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, - 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, - 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0x5a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, - 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, - 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17, - 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, - 0x14, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, - 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63, - 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04, - 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x46, - 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x35, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, - 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x65, 0x61, 0x6c, 0x22, 0x84, - 0x01, 0x0a, 0x12, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, - 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc9, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, - 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x73, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, - 0x98, 0x02, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, - 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, - 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x22, - 0x29, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, - 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x09, 0x0a, 0x05, 0x54, 0x52, 0x45, 0x45, 0x53, 0x10, 0x02, 0x22, 0x98, 0x01, 0x0a, 0x1c, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, - 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x9e, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x89, 0x07, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, - 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, - 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0xee, 0x05, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, - 0x2b, 0x0a, 0x11, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x65, 0x76, 0x61, 0x63, - 0x75, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x4d, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x53, 0x0a, - 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x55, 0x6e, 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, - 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x27, - 0x0a, 0x0f, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x65, 0x65, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, - 0x65, 0x64, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x65, 0x65, 0x73, 0x1a, 0x25, 0x0a, 0x0d, 0x55, 0x6e, - 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x1a, 0x24, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x49, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x56, 0x41, 0x43, 0x55, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x48, - 0x41, 0x52, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, - 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, - 0x10, 0x02, 0x22, 0xa2, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x94, - 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, - 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, - 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x96, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, - 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xdc, - 0x01, 0x0a, 0x1c, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, - 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x4a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0xb5, 0x01, - 0x0a, 0x1d, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0xe1, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, - 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, - 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x4f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0xb0, 0x01, 0x0a, 0x1d, 0x47, 0x65, - 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0xca, 0x01, 0x0a, - 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x40, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x1a, 0x34, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x1f, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x1a, 0x1e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x24, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0xd2, 0x01, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x36, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2e, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x1f, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, - 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x4f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x22, 0xa0, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x28, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x34, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x29, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xca, 0x01, 0x0a, 0x15, - 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, - 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x61, - 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, - 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, - 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, - 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, - 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, - 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, - 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, - 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, - 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, - 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, - 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, - 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, - 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, - 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, - 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, - 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, - 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, - 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, - 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, - 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, - 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, - 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, - 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, - 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_services_control_service_proto_rawDescOnce sync.Once - file_pkg_services_control_service_proto_rawDescData = file_pkg_services_control_service_proto_rawDesc -) - -func file_pkg_services_control_service_proto_rawDescGZIP() []byte { - file_pkg_services_control_service_proto_rawDescOnce.Do(func() { - file_pkg_services_control_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_service_proto_rawDescData) - }) - return file_pkg_services_control_service_proto_rawDescData -} - -var file_pkg_services_control_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 91) -var file_pkg_services_control_service_proto_goTypes = []interface{}{ - (StartShardEvacuationRequest_Body_Scope)(0), // 0: control.StartShardEvacuationRequest.Body.Scope - (GetShardEvacuationStatusResponse_Body_Status)(0), // 1: control.GetShardEvacuationStatusResponse.Body.Status - (*HealthCheckRequest)(nil), // 2: control.HealthCheckRequest - (*HealthCheckResponse)(nil), // 3: control.HealthCheckResponse - (*SetNetmapStatusRequest)(nil), // 4: control.SetNetmapStatusRequest - (*SetNetmapStatusResponse)(nil), // 5: control.SetNetmapStatusResponse - (*GetNetmapStatusRequest)(nil), // 6: control.GetNetmapStatusRequest - (*GetNetmapStatusResponse)(nil), // 7: control.GetNetmapStatusResponse - (*DropObjectsRequest)(nil), // 8: control.DropObjectsRequest - (*DropObjectsResponse)(nil), // 9: control.DropObjectsResponse - (*ListShardsRequest)(nil), // 10: control.ListShardsRequest - (*ListShardsResponse)(nil), // 11: control.ListShardsResponse - (*SetShardModeRequest)(nil), // 12: control.SetShardModeRequest - (*SetShardModeResponse)(nil), // 13: control.SetShardModeResponse - (*SynchronizeTreeRequest)(nil), // 14: control.SynchronizeTreeRequest - (*SynchronizeTreeResponse)(nil), // 15: control.SynchronizeTreeResponse - (*EvacuateShardRequest)(nil), // 16: control.EvacuateShardRequest - (*EvacuateShardResponse)(nil), // 17: control.EvacuateShardResponse - (*FlushCacheRequest)(nil), // 18: control.FlushCacheRequest - (*FlushCacheResponse)(nil), // 19: control.FlushCacheResponse - (*DoctorRequest)(nil), // 20: control.DoctorRequest - (*DoctorResponse)(nil), // 21: control.DoctorResponse - (*StartShardEvacuationRequest)(nil), // 22: control.StartShardEvacuationRequest - (*StartShardEvacuationResponse)(nil), // 23: control.StartShardEvacuationResponse - (*GetShardEvacuationStatusRequest)(nil), // 24: control.GetShardEvacuationStatusRequest - (*GetShardEvacuationStatusResponse)(nil), // 25: control.GetShardEvacuationStatusResponse - (*ResetShardEvacuationStatusRequest)(nil), // 26: control.ResetShardEvacuationStatusRequest - (*ResetShardEvacuationStatusResponse)(nil), // 27: control.ResetShardEvacuationStatusResponse - (*StopShardEvacuationRequest)(nil), // 28: control.StopShardEvacuationRequest - (*StopShardEvacuationResponse)(nil), // 29: control.StopShardEvacuationResponse - (*AddChainLocalOverrideRequest)(nil), // 30: control.AddChainLocalOverrideRequest - (*AddChainLocalOverrideResponse)(nil), // 31: control.AddChainLocalOverrideResponse - (*GetChainLocalOverrideRequest)(nil), // 32: control.GetChainLocalOverrideRequest - (*GetChainLocalOverrideResponse)(nil), // 33: control.GetChainLocalOverrideResponse - (*ListChainLocalOverridesRequest)(nil), // 34: control.ListChainLocalOverridesRequest - (*ListChainLocalOverridesResponse)(nil), // 35: control.ListChainLocalOverridesResponse - (*ListTargetsLocalOverridesRequest)(nil), // 36: control.ListTargetsLocalOverridesRequest - (*ListTargetsLocalOverridesResponse)(nil), // 37: control.ListTargetsLocalOverridesResponse - (*RemoveChainLocalOverrideRequest)(nil), // 38: control.RemoveChainLocalOverrideRequest - (*RemoveChainLocalOverrideResponse)(nil), // 39: control.RemoveChainLocalOverrideResponse - (*RemoveChainLocalOverridesByTargetRequest)(nil), // 40: control.RemoveChainLocalOverridesByTargetRequest - (*RemoveChainLocalOverridesByTargetResponse)(nil), // 41: control.RemoveChainLocalOverridesByTargetResponse - (*SealWriteCacheRequest)(nil), // 42: control.SealWriteCacheRequest - (*SealWriteCacheResponse)(nil), // 43: control.SealWriteCacheResponse - (*DetachShardsRequest)(nil), // 44: control.DetachShardsRequest - (*DetachShardsResponse)(nil), // 45: control.DetachShardsResponse - (*HealthCheckRequest_Body)(nil), // 46: control.HealthCheckRequest.Body - (*HealthCheckResponse_Body)(nil), // 47: control.HealthCheckResponse.Body - (*SetNetmapStatusRequest_Body)(nil), // 48: control.SetNetmapStatusRequest.Body - (*SetNetmapStatusResponse_Body)(nil), // 49: control.SetNetmapStatusResponse.Body - (*GetNetmapStatusRequest_Body)(nil), // 50: control.GetNetmapStatusRequest.Body - (*GetNetmapStatusResponse_Body)(nil), // 51: control.GetNetmapStatusResponse.Body - (*DropObjectsRequest_Body)(nil), // 52: control.DropObjectsRequest.Body - (*DropObjectsResponse_Body)(nil), // 53: control.DropObjectsResponse.Body - (*ListShardsRequest_Body)(nil), // 54: control.ListShardsRequest.Body - (*ListShardsResponse_Body)(nil), // 55: control.ListShardsResponse.Body - (*SetShardModeRequest_Body)(nil), // 56: control.SetShardModeRequest.Body - (*SetShardModeResponse_Body)(nil), // 57: control.SetShardModeResponse.Body - (*SynchronizeTreeRequest_Body)(nil), // 58: control.SynchronizeTreeRequest.Body - (*SynchronizeTreeResponse_Body)(nil), // 59: control.SynchronizeTreeResponse.Body - (*EvacuateShardRequest_Body)(nil), // 60: control.EvacuateShardRequest.Body - (*EvacuateShardResponse_Body)(nil), // 61: control.EvacuateShardResponse.Body - (*FlushCacheRequest_Body)(nil), // 62: control.FlushCacheRequest.Body - (*FlushCacheResponse_Body)(nil), // 63: control.FlushCacheResponse.Body - (*DoctorRequest_Body)(nil), // 64: control.DoctorRequest.Body - (*DoctorResponse_Body)(nil), // 65: control.DoctorResponse.Body - (*StartShardEvacuationRequest_Body)(nil), // 66: control.StartShardEvacuationRequest.Body - (*StartShardEvacuationResponse_Body)(nil), // 67: control.StartShardEvacuationResponse.Body - (*GetShardEvacuationStatusRequest_Body)(nil), // 68: control.GetShardEvacuationStatusRequest.Body - (*GetShardEvacuationStatusResponse_Body)(nil), // 69: control.GetShardEvacuationStatusResponse.Body - (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil), // 70: control.GetShardEvacuationStatusResponse.Body.UnixTimestamp - (*GetShardEvacuationStatusResponse_Body_Duration)(nil), // 71: control.GetShardEvacuationStatusResponse.Body.Duration - (*ResetShardEvacuationStatusRequest_Body)(nil), // 72: control.ResetShardEvacuationStatusRequest.Body - (*ResetShardEvacuationStatusResponse_Body)(nil), // 73: control.ResetShardEvacuationStatusResponse.Body - (*StopShardEvacuationRequest_Body)(nil), // 74: control.StopShardEvacuationRequest.Body - (*StopShardEvacuationResponse_Body)(nil), // 75: control.StopShardEvacuationResponse.Body - (*AddChainLocalOverrideRequest_Body)(nil), // 76: control.AddChainLocalOverrideRequest.Body - (*AddChainLocalOverrideResponse_Body)(nil), // 77: control.AddChainLocalOverrideResponse.Body - (*GetChainLocalOverrideRequest_Body)(nil), // 78: control.GetChainLocalOverrideRequest.Body - (*GetChainLocalOverrideResponse_Body)(nil), // 79: control.GetChainLocalOverrideResponse.Body - (*ListChainLocalOverridesRequest_Body)(nil), // 80: control.ListChainLocalOverridesRequest.Body - (*ListChainLocalOverridesResponse_Body)(nil), // 81: control.ListChainLocalOverridesResponse.Body - (*ListTargetsLocalOverridesRequest_Body)(nil), // 82: control.ListTargetsLocalOverridesRequest.Body - (*ListTargetsLocalOverridesResponse_Body)(nil), // 83: control.ListTargetsLocalOverridesResponse.Body - (*RemoveChainLocalOverrideRequest_Body)(nil), // 84: control.RemoveChainLocalOverrideRequest.Body - (*RemoveChainLocalOverrideResponse_Body)(nil), // 85: control.RemoveChainLocalOverrideResponse.Body - (*RemoveChainLocalOverridesByTargetRequest_Body)(nil), // 86: control.RemoveChainLocalOverridesByTargetRequest.Body - (*RemoveChainLocalOverridesByTargetResponse_Body)(nil), // 87: control.RemoveChainLocalOverridesByTargetResponse.Body - (*SealWriteCacheRequest_Body)(nil), // 88: control.SealWriteCacheRequest.Body - (*SealWriteCacheResponse_Body)(nil), // 89: control.SealWriteCacheResponse.Body - (*SealWriteCacheResponse_Body_Status)(nil), // 90: control.SealWriteCacheResponse.Body.Status - (*DetachShardsRequest_Body)(nil), // 91: control.DetachShardsRequest.Body - (*DetachShardsResponse_Body)(nil), // 92: control.DetachShardsResponse.Body - (*Signature)(nil), // 93: control.Signature - (NetmapStatus)(0), // 94: control.NetmapStatus - (HealthStatus)(0), // 95: control.HealthStatus - (*ShardInfo)(nil), // 96: control.ShardInfo - (ShardMode)(0), // 97: control.ShardMode - (*ChainTarget)(nil), // 98: control.ChainTarget -} -var file_pkg_services_control_service_proto_depIdxs = []int32{ - 46, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body - 93, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature - 47, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body - 93, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature - 48, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body - 93, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature - 49, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body - 93, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature - 50, // 8: control.GetNetmapStatusRequest.body:type_name -> control.GetNetmapStatusRequest.Body - 93, // 9: control.GetNetmapStatusRequest.signature:type_name -> control.Signature - 51, // 10: control.GetNetmapStatusResponse.body:type_name -> control.GetNetmapStatusResponse.Body - 93, // 11: control.GetNetmapStatusResponse.signature:type_name -> control.Signature - 52, // 12: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body - 93, // 13: control.DropObjectsRequest.signature:type_name -> control.Signature - 53, // 14: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body - 93, // 15: control.DropObjectsResponse.signature:type_name -> control.Signature - 54, // 16: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body - 93, // 17: control.ListShardsRequest.signature:type_name -> control.Signature - 55, // 18: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body - 93, // 19: control.ListShardsResponse.signature:type_name -> control.Signature - 56, // 20: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body - 93, // 21: control.SetShardModeRequest.signature:type_name -> control.Signature - 57, // 22: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body - 93, // 23: control.SetShardModeResponse.signature:type_name -> control.Signature - 58, // 24: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body - 93, // 25: control.SynchronizeTreeRequest.signature:type_name -> control.Signature - 59, // 26: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body - 93, // 27: control.SynchronizeTreeResponse.signature:type_name -> control.Signature - 60, // 28: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body - 93, // 29: control.EvacuateShardRequest.signature:type_name -> control.Signature - 61, // 30: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body - 93, // 31: control.EvacuateShardResponse.signature:type_name -> control.Signature - 62, // 32: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body - 93, // 33: control.FlushCacheRequest.signature:type_name -> control.Signature - 63, // 34: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body - 93, // 35: control.FlushCacheResponse.signature:type_name -> control.Signature - 64, // 36: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body - 93, // 37: control.DoctorRequest.signature:type_name -> control.Signature - 65, // 38: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body - 93, // 39: control.DoctorResponse.signature:type_name -> control.Signature - 66, // 40: control.StartShardEvacuationRequest.body:type_name -> control.StartShardEvacuationRequest.Body - 93, // 41: control.StartShardEvacuationRequest.signature:type_name -> control.Signature - 67, // 42: control.StartShardEvacuationResponse.body:type_name -> control.StartShardEvacuationResponse.Body - 93, // 43: control.StartShardEvacuationResponse.signature:type_name -> control.Signature - 68, // 44: control.GetShardEvacuationStatusRequest.body:type_name -> control.GetShardEvacuationStatusRequest.Body - 93, // 45: control.GetShardEvacuationStatusRequest.signature:type_name -> control.Signature - 69, // 46: control.GetShardEvacuationStatusResponse.body:type_name -> control.GetShardEvacuationStatusResponse.Body - 93, // 47: control.GetShardEvacuationStatusResponse.signature:type_name -> control.Signature - 72, // 48: control.ResetShardEvacuationStatusRequest.body:type_name -> control.ResetShardEvacuationStatusRequest.Body - 93, // 49: control.ResetShardEvacuationStatusRequest.signature:type_name -> control.Signature - 73, // 50: control.ResetShardEvacuationStatusResponse.body:type_name -> control.ResetShardEvacuationStatusResponse.Body - 93, // 51: control.ResetShardEvacuationStatusResponse.signature:type_name -> control.Signature - 74, // 52: control.StopShardEvacuationRequest.body:type_name -> control.StopShardEvacuationRequest.Body - 93, // 53: control.StopShardEvacuationRequest.signature:type_name -> control.Signature - 75, // 54: control.StopShardEvacuationResponse.body:type_name -> control.StopShardEvacuationResponse.Body - 93, // 55: control.StopShardEvacuationResponse.signature:type_name -> control.Signature - 76, // 56: control.AddChainLocalOverrideRequest.body:type_name -> control.AddChainLocalOverrideRequest.Body - 93, // 57: control.AddChainLocalOverrideRequest.signature:type_name -> control.Signature - 77, // 58: control.AddChainLocalOverrideResponse.body:type_name -> control.AddChainLocalOverrideResponse.Body - 93, // 59: control.AddChainLocalOverrideResponse.signature:type_name -> control.Signature - 78, // 60: control.GetChainLocalOverrideRequest.body:type_name -> control.GetChainLocalOverrideRequest.Body - 93, // 61: control.GetChainLocalOverrideRequest.signature:type_name -> control.Signature - 79, // 62: control.GetChainLocalOverrideResponse.body:type_name -> control.GetChainLocalOverrideResponse.Body - 93, // 63: control.GetChainLocalOverrideResponse.signature:type_name -> control.Signature - 80, // 64: control.ListChainLocalOverridesRequest.body:type_name -> control.ListChainLocalOverridesRequest.Body - 93, // 65: control.ListChainLocalOverridesRequest.signature:type_name -> control.Signature - 81, // 66: control.ListChainLocalOverridesResponse.body:type_name -> control.ListChainLocalOverridesResponse.Body - 93, // 67: control.ListChainLocalOverridesResponse.signature:type_name -> control.Signature - 82, // 68: control.ListTargetsLocalOverridesRequest.body:type_name -> control.ListTargetsLocalOverridesRequest.Body - 93, // 69: control.ListTargetsLocalOverridesRequest.signature:type_name -> control.Signature - 83, // 70: control.ListTargetsLocalOverridesResponse.body:type_name -> control.ListTargetsLocalOverridesResponse.Body - 93, // 71: control.ListTargetsLocalOverridesResponse.signature:type_name -> control.Signature - 84, // 72: control.RemoveChainLocalOverrideRequest.body:type_name -> control.RemoveChainLocalOverrideRequest.Body - 93, // 73: control.RemoveChainLocalOverrideRequest.signature:type_name -> control.Signature - 85, // 74: control.RemoveChainLocalOverrideResponse.body:type_name -> control.RemoveChainLocalOverrideResponse.Body - 93, // 75: control.RemoveChainLocalOverrideResponse.signature:type_name -> control.Signature - 86, // 76: control.RemoveChainLocalOverridesByTargetRequest.body:type_name -> control.RemoveChainLocalOverridesByTargetRequest.Body - 93, // 77: control.RemoveChainLocalOverridesByTargetRequest.signature:type_name -> control.Signature - 87, // 78: control.RemoveChainLocalOverridesByTargetResponse.body:type_name -> control.RemoveChainLocalOverridesByTargetResponse.Body - 93, // 79: control.RemoveChainLocalOverridesByTargetResponse.signature:type_name -> control.Signature - 88, // 80: control.SealWriteCacheRequest.body:type_name -> control.SealWriteCacheRequest.Body - 93, // 81: control.SealWriteCacheRequest.signature:type_name -> control.Signature - 89, // 82: control.SealWriteCacheResponse.body:type_name -> control.SealWriteCacheResponse.Body - 93, // 83: control.SealWriteCacheResponse.signature:type_name -> control.Signature - 91, // 84: control.DetachShardsRequest.body:type_name -> control.DetachShardsRequest.Body - 93, // 85: control.DetachShardsRequest.signature:type_name -> control.Signature - 92, // 86: control.DetachShardsResponse.body:type_name -> control.DetachShardsResponse.Body - 93, // 87: control.DetachShardsResponse.signature:type_name -> control.Signature - 94, // 88: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus - 95, // 89: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus - 94, // 90: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus - 94, // 91: control.GetNetmapStatusResponse.Body.status:type_name -> control.NetmapStatus - 96, // 92: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo - 97, // 93: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode - 1, // 94: control.GetShardEvacuationStatusResponse.Body.status:type_name -> control.GetShardEvacuationStatusResponse.Body.Status - 71, // 95: control.GetShardEvacuationStatusResponse.Body.duration:type_name -> control.GetShardEvacuationStatusResponse.Body.Duration - 70, // 96: control.GetShardEvacuationStatusResponse.Body.started_at:type_name -> control.GetShardEvacuationStatusResponse.Body.UnixTimestamp - 98, // 97: control.AddChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget - 98, // 98: control.GetChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget - 98, // 99: control.ListChainLocalOverridesRequest.Body.target:type_name -> control.ChainTarget - 98, // 100: control.ListTargetsLocalOverridesResponse.Body.targets:type_name -> control.ChainTarget - 98, // 101: control.RemoveChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget - 98, // 102: control.RemoveChainLocalOverridesByTargetRequest.Body.target:type_name -> control.ChainTarget - 90, // 103: control.SealWriteCacheResponse.Body.results:type_name -> control.SealWriteCacheResponse.Body.Status - 2, // 104: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest - 4, // 105: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest - 6, // 106: control.ControlService.GetNetmapStatus:input_type -> control.GetNetmapStatusRequest - 8, // 107: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest - 10, // 108: control.ControlService.ListShards:input_type -> control.ListShardsRequest - 12, // 109: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest - 14, // 110: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest - 16, // 111: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest - 22, // 112: control.ControlService.StartShardEvacuation:input_type -> control.StartShardEvacuationRequest - 24, // 113: control.ControlService.GetShardEvacuationStatus:input_type -> control.GetShardEvacuationStatusRequest - 26, // 114: control.ControlService.ResetShardEvacuationStatus:input_type -> control.ResetShardEvacuationStatusRequest - 28, // 115: control.ControlService.StopShardEvacuation:input_type -> control.StopShardEvacuationRequest - 18, // 116: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest - 20, // 117: control.ControlService.Doctor:input_type -> control.DoctorRequest - 30, // 118: control.ControlService.AddChainLocalOverride:input_type -> control.AddChainLocalOverrideRequest - 32, // 119: control.ControlService.GetChainLocalOverride:input_type -> control.GetChainLocalOverrideRequest - 34, // 120: control.ControlService.ListChainLocalOverrides:input_type -> control.ListChainLocalOverridesRequest - 38, // 121: control.ControlService.RemoveChainLocalOverride:input_type -> control.RemoveChainLocalOverrideRequest - 40, // 122: control.ControlService.RemoveChainLocalOverridesByTarget:input_type -> control.RemoveChainLocalOverridesByTargetRequest - 36, // 123: control.ControlService.ListTargetsLocalOverrides:input_type -> control.ListTargetsLocalOverridesRequest - 42, // 124: control.ControlService.SealWriteCache:input_type -> control.SealWriteCacheRequest - 44, // 125: control.ControlService.DetachShards:input_type -> control.DetachShardsRequest - 3, // 126: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse - 5, // 127: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse - 7, // 128: control.ControlService.GetNetmapStatus:output_type -> control.GetNetmapStatusResponse - 9, // 129: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse - 11, // 130: control.ControlService.ListShards:output_type -> control.ListShardsResponse - 13, // 131: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse - 15, // 132: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse - 17, // 133: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse - 23, // 134: control.ControlService.StartShardEvacuation:output_type -> control.StartShardEvacuationResponse - 25, // 135: control.ControlService.GetShardEvacuationStatus:output_type -> control.GetShardEvacuationStatusResponse - 27, // 136: control.ControlService.ResetShardEvacuationStatus:output_type -> control.ResetShardEvacuationStatusResponse - 29, // 137: control.ControlService.StopShardEvacuation:output_type -> control.StopShardEvacuationResponse - 19, // 138: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse - 21, // 139: control.ControlService.Doctor:output_type -> control.DoctorResponse - 31, // 140: control.ControlService.AddChainLocalOverride:output_type -> control.AddChainLocalOverrideResponse - 33, // 141: control.ControlService.GetChainLocalOverride:output_type -> control.GetChainLocalOverrideResponse - 35, // 142: control.ControlService.ListChainLocalOverrides:output_type -> control.ListChainLocalOverridesResponse - 39, // 143: control.ControlService.RemoveChainLocalOverride:output_type -> control.RemoveChainLocalOverrideResponse - 41, // 144: control.ControlService.RemoveChainLocalOverridesByTarget:output_type -> control.RemoveChainLocalOverridesByTargetResponse - 37, // 145: control.ControlService.ListTargetsLocalOverrides:output_type -> control.ListTargetsLocalOverridesResponse - 43, // 146: control.ControlService.SealWriteCache:output_type -> control.SealWriteCacheResponse - 45, // 147: control.ControlService.DetachShards:output_type -> control.DetachShardsResponse - 126, // [126:148] is the sub-list for method output_type - 104, // [104:126] is the sub-list for method input_type - 104, // [104:104] is the sub-list for extension type_name - 104, // [104:104] is the sub-list for extension extendee - 0, // [0:104] is the sub-list for field type_name -} - -func init() { file_pkg_services_control_service_proto_init() } -func file_pkg_services_control_service_proto_init() { - if File_pkg_services_control_service_proto != nil { - return - } - file_pkg_services_control_types_proto_init() - if !protoimpl.UnsafeEnabled { - file_pkg_services_control_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNetmapStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNetmapStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNetmapStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNetmapStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropObjectsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListShardsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListShardsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardModeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardModeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SynchronizeTreeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SynchronizeTreeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvacuateShardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvacuateShardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlushCacheRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlushCacheResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoctorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoctorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartShardEvacuationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartShardEvacuationResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetShardEvacuationStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetShardEvacuationStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopShardEvacuationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopShardEvacuationResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddChainLocalOverrideRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddChainLocalOverrideResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetChainLocalOverrideRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetChainLocalOverrideResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListChainLocalOverridesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListChainLocalOverridesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTargetsLocalOverridesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTargetsLocalOverridesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverrideRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverrideResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverridesByTargetRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverridesByTargetResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SealWriteCacheRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SealWriteCacheResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DetachShardsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DetachShardsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNetmapStatusRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetNetmapStatusResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNetmapStatusRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNetmapStatusResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropObjectsRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropObjectsResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListShardsRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListShardsResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardModeRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardModeResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SynchronizeTreeRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SynchronizeTreeResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvacuateShardRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvacuateShardResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlushCacheRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlushCacheResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoctorRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoctorResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartShardEvacuationRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartShardEvacuationResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusResponse_Body_UnixTimestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardEvacuationStatusResponse_Body_Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetShardEvacuationStatusRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetShardEvacuationStatusResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopShardEvacuationRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopShardEvacuationResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddChainLocalOverrideRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddChainLocalOverrideResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetChainLocalOverrideRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetChainLocalOverrideResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListChainLocalOverridesRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListChainLocalOverridesResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTargetsLocalOverridesRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTargetsLocalOverridesResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverrideRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverrideResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverridesByTargetRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveChainLocalOverridesByTargetResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SealWriteCacheRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SealWriteCacheResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SealWriteCacheResponse_Body_Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DetachShardsRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_service_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DetachShardsResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_control_service_proto_rawDesc, - NumEnums: 2, - NumMessages: 91, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_services_control_service_proto_goTypes, - DependencyIndexes: file_pkg_services_control_service_proto_depIdxs, - EnumInfos: file_pkg_services_control_service_proto_enumTypes, - MessageInfos: file_pkg_services_control_service_proto_msgTypes, - }.Build() - File_pkg_services_control_service_proto = out.File - file_pkg_services_control_service_proto_rawDesc = nil - file_pkg_services_control_service_proto_goTypes = nil - file_pkg_services_control_service_proto_depIdxs = nil -} diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 2cd8434fc..4c539acfc 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -30,11 +30,6 @@ service ControlService { // Synchronizes all log operations for the specified tree. rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse); - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse); - // StartShardEvacuation starts moving all data from one shard to the others. rpc StartShardEvacuation(StartShardEvacuationRequest) returns (StartShardEvacuationResponse); @@ -91,6 +86,12 @@ service ControlService { // DetachShards detaches and closes shards. rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse); + + // StartShardRebuild starts shard rebuild process. + rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); + + // ListShardsForObject returns shard info where object is stored. + rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); } // Health check request. @@ -391,6 +392,12 @@ message StartShardEvacuationRequest { bool ignore_errors = 2; // Evacuation scope. uint32 scope = 3; + // Count of concurrent container evacuation workers. + uint32 container_worker_count = 4; + // Count of concurrent object evacuation workers. + uint32 object_worker_count = 5; + // Choose for evacuation objects in `REP 1` containers only. + bool rep_one_only = 6; } Body body = 1; @@ -655,6 +662,15 @@ message SealWriteCacheRequest { // Flag indicating whether object read errors should be ignored. bool ignore_errors = 2; + + // Flag indicating whether writecache will be sealed async. + bool async = 3; + + // If true, then writecache will be sealed, but mode will be restored to the current one. + bool restore_mode = 4; + + // If true, then writecache will shrink internal storage. + bool shrink = 5; } Body body = 1; @@ -690,3 +706,49 @@ message DetachShardsResponse { Signature signature = 2; } + +message StartShardRebuildRequest { + message Body { + repeated bytes shard_ID = 1; + uint32 target_fill_percent = 2; + uint32 concurrency_limit = 3; + } + + Body body = 1; + Signature signature = 2; +} + +message StartShardRebuildResponse { + message Body { + message Status { + bytes shard_ID = 1; + bool success = 2; + string error = 3; + } + repeated Status results = 1; + } + + Body body = 1; + + Signature signature = 2; +} + +message ListShardsForObjectRequest { + message Body { + string object_id = 1; + string container_id = 2; + } + + Body body = 1; + Signature signature = 2; +} + +message ListShardsForObjectResponse { + message Body { + // List of the node's shards storing object. + repeated bytes shard_ID = 1; + } + + Body body = 1; + Signature signature = 2; +} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index a287606fa..44849d591 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -2,7 +2,27 @@ package control -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +import ( + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" +) + +type HealthCheckRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil) + _ json.Marshaler = (*HealthCheckRequest_Body)(nil) + _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -14,18 +34,93 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckRequest struct { + Body *HealthCheckRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil) + _ json.Marshaler = (*HealthCheckRequest)(nil) + _ json.Unmarshaler = (*HealthCheckRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -38,27 +133,6 @@ func (x *HealthCheckRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -74,13 +148,176 @@ func (x *HealthCheckRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthCheckRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthCheckRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { + x.Body = v +} +func (x *HealthCheckRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthCheckRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthCheckRequest_Body + f = new(HealthCheckRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckResponse_Body struct { + NetmapStatus NetmapStatus `json:"netmapStatus"` + HealthStatus HealthStatus `json:"healthStatus"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil) + _ json.Marshaler = (*HealthCheckResponse_Body)(nil) + _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -93,27 +330,207 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.NetmapStatus)) - offset += proto.EnumMarshal(2, buf[offset:], int32(x.HealthStatus)) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.NetmapStatus) != 0 { + mm.AppendInt32(1, int32(x.NetmapStatus)) + } + if int32(x.HealthStatus) != 0 { + mm.AppendInt32(2, int32(x.HealthStatus)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body") + } + switch fc.FieldNum { + case 1: // NetmapStatus + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NetmapStatus") + } + x.NetmapStatus = NetmapStatus(data) + case 2: // HealthStatus + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "HealthStatus") + } + x.HealthStatus = HealthStatus(data) + } + } + return nil +} +func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus { + if x != nil { + return x.NetmapStatus + } + return 0 +} +func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) { + x.NetmapStatus = v +} +func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { + if x != nil { + return x.HealthStatus + } + return 0 +} +func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { + x.HealthStatus = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"netmapStatus\":" + out.RawString(prefix) + v := int32(x.NetmapStatus) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"healthStatus\":" + out.RawString(prefix) + v := int32(x.HealthStatus) + if vv, ok := HealthStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "netmapStatus": + { + var f NetmapStatus + var parsedValue NetmapStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := NetmapStatus_value[v]; ok { + parsedValue = NetmapStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = NetmapStatus(vv) + case float64: + parsedValue = NetmapStatus(v) + } + f = parsedValue + x.NetmapStatus = f + } + case "healthStatus": + { + var f HealthStatus + var parsedValue HealthStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := HealthStatus_value[v]; ok { + parsedValue = HealthStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = HealthStatus(vv) + case float64: + parsedValue = HealthStatus(v) + } + f = parsedValue + x.HealthStatus = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthCheckResponse struct { + Body *HealthCheckResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil) + _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil) + _ json.Marshaler = (*HealthCheckResponse)(nil) + _ json.Unmarshaler = (*HealthCheckResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -126,27 +543,6 @@ func (x *HealthCheckResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -162,13 +558,176 @@ func (x *HealthCheckResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthCheckResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthCheckResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { + x.Body = v +} +func (x *HealthCheckResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthCheckResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthCheckResponse_Body + f = new(HealthCheckResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetNetmapStatusRequest_Body struct { + Status NetmapStatus `json:"status"` + ForceMaintenance bool `json:"forceMaintenance"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest_Body)(nil) + _ json.Marshaler = (*SetNetmapStatusRequest_Body)(nil) + _ json.Unmarshaler = (*SetNetmapStatusRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -181,27 +740,186 @@ func (x *SetNetmapStatusRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status)) - offset += proto.BoolMarshal(2, buf[offset:], x.ForceMaintenance) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.Status) != 0 { + mm.AppendInt32(1, int32(x.Status)) + } + if x.ForceMaintenance { + mm.AppendBool(2, x.ForceMaintenance) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest_Body") + } + switch fc.FieldNum { + case 1: // Status + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Status") + } + x.Status = NetmapStatus(data) + case 2: // ForceMaintenance + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ForceMaintenance") + } + x.ForceMaintenance = data + } + } + return nil +} +func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus { + if x != nil { + return x.Status + } + return 0 +} +func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) { + x.Status = v +} +func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool { + if x != nil { + return x.ForceMaintenance + } + return false +} +func (x *SetNetmapStatusRequest_Body) SetForceMaintenance(v bool) { + x.ForceMaintenance = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" + out.RawString(prefix) + v := int32(x.Status) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"forceMaintenance\":" + out.RawString(prefix) + out.Bool(x.ForceMaintenance) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "status": + { + var f NetmapStatus + var parsedValue NetmapStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := NetmapStatus_value[v]; ok { + parsedValue = NetmapStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = NetmapStatus(vv) + case float64: + parsedValue = NetmapStatus(v) + } + f = parsedValue + x.Status = f + } + case "forceMaintenance": + { + var f bool + f = in.Bool() + x.ForceMaintenance = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetNetmapStatusRequest struct { + Body *SetNetmapStatusRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest)(nil) + _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest)(nil) + _ json.Marshaler = (*SetNetmapStatusRequest)(nil) + _ json.Unmarshaler = (*SetNetmapStatusRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -214,27 +932,6 @@ func (x *SetNetmapStatusRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetNetmapStatusRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -250,13 +947,174 @@ func (x *SetNetmapStatusRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SetNetmapStatusRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SetNetmapStatusRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) { + x.Body = v +} +func (x *SetNetmapStatusRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SetNetmapStatusRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetNetmapStatusRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetNetmapStatusRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SetNetmapStatusRequest_Body + f = new(SetNetmapStatusRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetNetmapStatusResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse_Body)(nil) + _ json.Marshaler = (*SetNetmapStatusResponse_Body)(nil) + _ json.Unmarshaler = (*SetNetmapStatusResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -267,18 +1125,93 @@ func (x *SetNetmapStatusResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetNetmapStatusResponse struct { + Body *SetNetmapStatusResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse)(nil) + _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse)(nil) + _ json.Marshaler = (*SetNetmapStatusResponse)(nil) + _ json.Unmarshaler = (*SetNetmapStatusResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -291,27 +1224,6 @@ func (x *SetNetmapStatusResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetNetmapStatusResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -327,13 +1239,174 @@ func (x *SetNetmapStatusResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SetNetmapStatusResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SetNetmapStatusResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) { + x.Body = v +} +func (x *SetNetmapStatusResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SetNetmapStatusResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetNetmapStatusResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetNetmapStatusResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SetNetmapStatusResponse_Body + f = new(SetNetmapStatusResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNetmapStatusRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest_Body)(nil) + _ json.Marshaler = (*GetNetmapStatusRequest_Body)(nil) + _ json.Unmarshaler = (*GetNetmapStatusRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -344,18 +1417,93 @@ func (x *GetNetmapStatusRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNetmapStatusRequest struct { + Body *GetNetmapStatusRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest)(nil) + _ json.Marshaler = (*GetNetmapStatusRequest)(nil) + _ json.Unmarshaler = (*GetNetmapStatusRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -368,27 +1516,6 @@ func (x *GetNetmapStatusRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNetmapStatusRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -404,13 +1531,176 @@ func (x *GetNetmapStatusRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetNetmapStatusRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetNetmapStatusRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetNetmapStatusRequest) SetBody(v *GetNetmapStatusRequest_Body) { + x.Body = v +} +func (x *GetNetmapStatusRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetNetmapStatusRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNetmapStatusRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNetmapStatusRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetNetmapStatusRequest_Body + f = new(GetNetmapStatusRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNetmapStatusResponse_Body struct { + Status NetmapStatus `json:"status"` + Epoch uint64 `json:"epoch"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse_Body)(nil) + _ json.Marshaler = (*GetNetmapStatusResponse_Body)(nil) + _ json.Unmarshaler = (*GetNetmapStatusResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -423,27 +1713,196 @@ func (x *GetNetmapStatusResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status)) - offset += proto.UInt64Marshal(2, buf[offset:], x.Epoch) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.Status) != 0 { + mm.AppendInt32(1, int32(x.Status)) + } + if x.Epoch != 0 { + mm.AppendUint64(2, x.Epoch) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse_Body") + } + switch fc.FieldNum { + case 1: // Status + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Status") + } + x.Status = NetmapStatus(data) + case 2: // Epoch + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Epoch") + } + x.Epoch = data + } + } + return nil +} +func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus { + if x != nil { + return x.Status + } + return 0 +} +func (x *GetNetmapStatusResponse_Body) SetStatus(v NetmapStatus) { + x.Status = v +} +func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} +func (x *GetNetmapStatusResponse_Body) SetEpoch(v uint64) { + x.Epoch = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" + out.RawString(prefix) + v := int32(x.Status) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"epoch\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "status": + { + var f NetmapStatus + var parsedValue NetmapStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := NetmapStatus_value[v]; ok { + parsedValue = NetmapStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = NetmapStatus(vv) + case float64: + parsedValue = NetmapStatus(v) + } + f = parsedValue + x.Status = f + } + case "epoch": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Epoch = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNetmapStatusResponse struct { + Body *GetNetmapStatusResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse)(nil) + _ json.Marshaler = (*GetNetmapStatusResponse)(nil) + _ json.Unmarshaler = (*GetNetmapStatusResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -456,27 +1915,6 @@ func (x *GetNetmapStatusResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNetmapStatusResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -492,13 +1930,175 @@ func (x *GetNetmapStatusResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetNetmapStatusResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetNetmapStatusResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetNetmapStatusResponse) SetBody(v *GetNetmapStatusResponse_Body) { + x.Body = v +} +func (x *GetNetmapStatusResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetNetmapStatusResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNetmapStatusResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNetmapStatusResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetNetmapStatusResponse_Body + f = new(GetNetmapStatusResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DropObjectsRequest_Body struct { + AddressList [][]byte `json:"addressList"` +} + +var ( + _ encoding.ProtoMarshaler = (*DropObjectsRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DropObjectsRequest_Body)(nil) + _ json.Marshaler = (*DropObjectsRequest_Body)(nil) + _ json.Unmarshaler = (*DropObjectsRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -510,26 +2110,155 @@ func (x *DropObjectsRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DropObjectsRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.AddressList) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DropObjectsRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DropObjectsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.AddressList { + mm.AppendBytes(1, x.AddressList[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DropObjectsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest_Body") + } + switch fc.FieldNum { + case 1: // AddressList + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "AddressList") + } + x.AddressList = append(x.AddressList, data) + } + } + return nil +} +func (x *DropObjectsRequest_Body) GetAddressList() [][]byte { + if x != nil { + return x.AddressList + } + return nil +} +func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) { + x.AddressList = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DropObjectsRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"addressList\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.AddressList { + if i != 0 { + out.RawByte(',') + } + if x.AddressList[i] != nil { + out.Base64Bytes(x.AddressList[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DropObjectsRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "addressList": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.AddressList = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DropObjectsRequest struct { + Body *DropObjectsRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DropObjectsRequest)(nil) + _ encoding.ProtoUnmarshaler = (*DropObjectsRequest)(nil) + _ json.Marshaler = (*DropObjectsRequest)(nil) + _ json.Unmarshaler = (*DropObjectsRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -542,27 +2271,6 @@ func (x *DropObjectsRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DropObjectsRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -578,13 +2286,174 @@ func (x *DropObjectsRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DropObjectsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DropObjectsRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DropObjectsRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DropObjectsRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DropObjectsRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) { + x.Body = v +} +func (x *DropObjectsRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DropObjectsRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DropObjectsRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DropObjectsRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DropObjectsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DropObjectsRequest_Body + f = new(DropObjectsRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DropObjectsResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*DropObjectsResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DropObjectsResponse_Body)(nil) + _ json.Marshaler = (*DropObjectsResponse_Body)(nil) + _ json.Unmarshaler = (*DropObjectsResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -595,18 +2464,93 @@ func (x *DropObjectsResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DropObjectsResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DropObjectsResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DropObjectsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DropObjectsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DropObjectsResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DropObjectsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DropObjectsResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DropObjectsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DropObjectsResponse struct { + Body *DropObjectsResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DropObjectsResponse)(nil) + _ encoding.ProtoUnmarshaler = (*DropObjectsResponse)(nil) + _ json.Marshaler = (*DropObjectsResponse)(nil) + _ json.Unmarshaler = (*DropObjectsResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -619,27 +2563,6 @@ func (x *DropObjectsResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DropObjectsResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -655,13 +2578,174 @@ func (x *DropObjectsResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DropObjectsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DropObjectsResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DropObjectsResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DropObjectsResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DropObjectsResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) { + x.Body = v +} +func (x *DropObjectsResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DropObjectsResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DropObjectsResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DropObjectsResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DropObjectsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DropObjectsResponse_Body + f = new(DropObjectsResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsRequest_Body)(nil) + _ json.Marshaler = (*ListShardsRequest_Body)(nil) + _ json.Unmarshaler = (*ListShardsRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -672,18 +2756,93 @@ func (x *ListShardsRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListShardsRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsRequest struct { + Body *ListShardsRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsRequest)(nil) + _ json.Marshaler = (*ListShardsRequest)(nil) + _ json.Unmarshaler = (*ListShardsRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -696,27 +2855,6 @@ func (x *ListShardsRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListShardsRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -732,13 +2870,175 @@ func (x *ListShardsRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListShardsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListShardsRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) { + x.Body = v +} +func (x *ListShardsRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsRequest_Body + f = new(ListShardsRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsResponse_Body struct { + Shards []ShardInfo `json:"shards"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsResponse_Body)(nil) + _ json.Marshaler = (*ListShardsResponse_Body)(nil) + _ json.Unmarshaler = (*ListShardsResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -747,33 +3047,155 @@ func (x *ListShardsResponse_Body) StableSize() (size int) { return 0 } for i := range x.Shards { - size += proto.NestedStructureSize(1, x.Shards[i]) + size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListShardsResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - for i := range x.Shards { - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Shards[i]) - } - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for i := range x.Shards { + x.Shards[i].EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsResponse_Body") + } + switch fc.FieldNum { + case 1: // Shards + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shards") + } + x.Shards = append(x.Shards, ShardInfo{}) + ff := &x.Shards[len(x.Shards)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsResponse_Body) GetShards() []ShardInfo { + if x != nil { + return x.Shards + } + return nil +} +func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) { + x.Shards = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shards\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shards { + if i != 0 { + out.RawByte(',') + } + x.Shards[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shards": + { + var f ShardInfo + var list []ShardInfo + in.Delim('[') + for !in.IsDelim(']') { + f = ShardInfo{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Shards = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsResponse struct { + Body *ListShardsResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsResponse)(nil) + _ json.Marshaler = (*ListShardsResponse)(nil) + _ json.Unmarshaler = (*ListShardsResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -786,27 +3208,6 @@ func (x *ListShardsResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListShardsResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -822,13 +3223,177 @@ func (x *ListShardsResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListShardsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListShardsResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) { + x.Body = v +} +func (x *ListShardsResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsResponse_Body + f = new(ListShardsResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetShardModeRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + Mode ShardMode `json:"mode"` + ResetErrorCounter bool `json:"resetErrorCounter"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetShardModeRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SetShardModeRequest_Body)(nil) + _ json.Marshaler = (*SetShardModeRequest_Body)(nil) + _ json.Unmarshaler = (*SetShardModeRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -842,28 +3407,244 @@ func (x *SetShardModeRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetShardModeRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.EnumMarshal(2, buf[offset:], int32(x.Mode)) - offset += proto.BoolMarshal(3, buf[offset:], x.ResetErrorCounter) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetShardModeRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetShardModeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if int32(x.Mode) != 0 { + mm.AppendInt32(2, int32(x.Mode)) + } + if x.ResetErrorCounter { + mm.AppendBool(3, x.ResetErrorCounter) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetShardModeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // Mode + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Mode") + } + x.Mode = ShardMode(data) + case 3: // ResetErrorCounter + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ResetErrorCounter") + } + x.ResetErrorCounter = data + } + } + return nil +} +func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *SetShardModeRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *SetShardModeRequest_Body) GetMode() ShardMode { + if x != nil { + return x.Mode + } + return 0 +} +func (x *SetShardModeRequest_Body) SetMode(v ShardMode) { + x.Mode = v +} +func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool { + if x != nil { + return x.ResetErrorCounter + } + return false +} +func (x *SetShardModeRequest_Body) SetResetErrorCounter(v bool) { + x.ResetErrorCounter = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetShardModeRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"mode\":" + out.RawString(prefix) + v := int32(x.Mode) + if vv, ok := ShardMode_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"resetErrorCounter\":" + out.RawString(prefix) + out.Bool(x.ResetErrorCounter) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetShardModeRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "mode": + { + var f ShardMode + var parsedValue ShardMode + switch v := in.Interface().(type) { + case string: + if vv, ok := ShardMode_value[v]; ok { + parsedValue = ShardMode(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = ShardMode(vv) + case float64: + parsedValue = ShardMode(v) + } + f = parsedValue + x.Mode = f + } + case "resetErrorCounter": + { + var f bool + f = in.Bool() + x.ResetErrorCounter = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetShardModeRequest struct { + Body *SetShardModeRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetShardModeRequest)(nil) + _ encoding.ProtoUnmarshaler = (*SetShardModeRequest)(nil) + _ json.Marshaler = (*SetShardModeRequest)(nil) + _ json.Unmarshaler = (*SetShardModeRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -876,27 +3657,6 @@ func (x *SetShardModeRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetShardModeRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -912,13 +3672,174 @@ func (x *SetShardModeRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SetShardModeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SetShardModeRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetShardModeRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetShardModeRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SetShardModeRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) { + x.Body = v +} +func (x *SetShardModeRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SetShardModeRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetShardModeRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetShardModeRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetShardModeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SetShardModeRequest_Body + f = new(SetShardModeRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetShardModeResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*SetShardModeResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SetShardModeResponse_Body)(nil) + _ json.Marshaler = (*SetShardModeResponse_Body)(nil) + _ json.Unmarshaler = (*SetShardModeResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -929,18 +3850,93 @@ func (x *SetShardModeResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetShardModeResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetShardModeResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetShardModeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetShardModeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetShardModeResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetShardModeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetShardModeResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetShardModeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SetShardModeResponse struct { + Body *SetShardModeResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SetShardModeResponse)(nil) + _ encoding.ProtoUnmarshaler = (*SetShardModeResponse)(nil) + _ json.Marshaler = (*SetShardModeResponse)(nil) + _ json.Unmarshaler = (*SetShardModeResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -953,27 +3949,6 @@ func (x *SetShardModeResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SetShardModeResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -989,13 +3964,177 @@ func (x *SetShardModeResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SetShardModeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SetShardModeResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SetShardModeResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SetShardModeResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SetShardModeResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) { + x.Body = v +} +func (x *SetShardModeResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SetShardModeResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SetShardModeResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SetShardModeResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SetShardModeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SetShardModeResponse_Body + f = new(SetShardModeResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SynchronizeTreeRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + Height uint64 `json:"height"` +} + +var ( + _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest_Body)(nil) + _ json.Marshaler = (*SynchronizeTreeRequest_Body)(nil) + _ json.Unmarshaler = (*SynchronizeTreeRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1009,28 +4148,219 @@ func (x *SynchronizeTreeRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SynchronizeTreeRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.UInt64Marshal(3, buf[offset:], x.Height) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SynchronizeTreeRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SynchronizeTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.Height != 0 { + mm.AppendUint64(3, x.Height) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SynchronizeTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // Height + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Height") + } + x.Height = data + } + } + return nil +} +func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *SynchronizeTreeRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *SynchronizeTreeRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *SynchronizeTreeRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} +func (x *SynchronizeTreeRequest_Body) SetHeight(v uint64) { + x.Height = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SynchronizeTreeRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"height\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SynchronizeTreeRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "height": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Height = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SynchronizeTreeRequest struct { + Body *SynchronizeTreeRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest)(nil) + _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest)(nil) + _ json.Marshaler = (*SynchronizeTreeRequest)(nil) + _ json.Unmarshaler = (*SynchronizeTreeRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1043,27 +4373,6 @@ func (x *SynchronizeTreeRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SynchronizeTreeRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1079,13 +4388,174 @@ func (x *SynchronizeTreeRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SynchronizeTreeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SynchronizeTreeRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SynchronizeTreeRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SynchronizeTreeRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SynchronizeTreeRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) { + x.Body = v +} +func (x *SynchronizeTreeRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SynchronizeTreeRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SynchronizeTreeRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SynchronizeTreeRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SynchronizeTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SynchronizeTreeRequest_Body + f = new(SynchronizeTreeRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SynchronizeTreeResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse_Body)(nil) + _ json.Marshaler = (*SynchronizeTreeResponse_Body)(nil) + _ json.Unmarshaler = (*SynchronizeTreeResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1096,18 +4566,93 @@ func (x *SynchronizeTreeResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SynchronizeTreeResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SynchronizeTreeResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SynchronizeTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SynchronizeTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SynchronizeTreeResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SynchronizeTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SynchronizeTreeResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SynchronizeTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SynchronizeTreeResponse struct { + Body *SynchronizeTreeResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse)(nil) + _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse)(nil) + _ json.Marshaler = (*SynchronizeTreeResponse)(nil) + _ json.Unmarshaler = (*SynchronizeTreeResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1120,27 +4665,6 @@ func (x *SynchronizeTreeResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SynchronizeTreeResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1156,13 +4680,176 @@ func (x *SynchronizeTreeResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SynchronizeTreeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SynchronizeTreeResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SynchronizeTreeResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SynchronizeTreeResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SynchronizeTreeResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) { + x.Body = v +} +func (x *SynchronizeTreeResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SynchronizeTreeResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SynchronizeTreeResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SynchronizeTreeResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SynchronizeTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SynchronizeTreeResponse_Body + f = new(SynchronizeTreeResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type EvacuateShardRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + IgnoreErrors bool `json:"ignoreErrors"` +} + +var ( + _ encoding.ProtoMarshaler = (*EvacuateShardRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest_Body)(nil) + _ json.Marshaler = (*EvacuateShardRequest_Body)(nil) + _ json.Unmarshaler = (*EvacuateShardRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1175,27 +4862,189 @@ func (x *EvacuateShardRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *EvacuateShardRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *EvacuateShardRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *EvacuateShardRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if x.IgnoreErrors { + mm.AppendBool(2, x.IgnoreErrors) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *EvacuateShardRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // IgnoreErrors + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") + } + x.IgnoreErrors = data + } + } + return nil +} +func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *EvacuateShardRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool { + if x != nil { + return x.IgnoreErrors + } + return false +} +func (x *EvacuateShardRequest_Body) SetIgnoreErrors(v bool) { + x.IgnoreErrors = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *EvacuateShardRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" + out.RawString(prefix) + out.Bool(x.IgnoreErrors) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *EvacuateShardRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "ignoreErrors": + { + var f bool + f = in.Bool() + x.IgnoreErrors = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type EvacuateShardRequest struct { + Body *EvacuateShardRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*EvacuateShardRequest)(nil) + _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest)(nil) + _ json.Marshaler = (*EvacuateShardRequest)(nil) + _ json.Unmarshaler = (*EvacuateShardRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1208,27 +5057,6 @@ func (x *EvacuateShardRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *EvacuateShardRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1244,13 +5072,175 @@ func (x *EvacuateShardRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *EvacuateShardRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *EvacuateShardRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *EvacuateShardRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *EvacuateShardRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(EvacuateShardRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *EvacuateShardRequest) SetBody(v *EvacuateShardRequest_Body) { + x.Body = v +} +func (x *EvacuateShardRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *EvacuateShardRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *EvacuateShardRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *EvacuateShardRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *EvacuateShardRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *EvacuateShardRequest_Body + f = new(EvacuateShardRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type EvacuateShardResponse_Body struct { + Count uint32 `json:"count"` +} + +var ( + _ encoding.ProtoMarshaler = (*EvacuateShardResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse_Body)(nil) + _ json.Marshaler = (*EvacuateShardResponse_Body)(nil) + _ json.Unmarshaler = (*EvacuateShardResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1262,26 +5252,139 @@ func (x *EvacuateShardResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *EvacuateShardResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Count) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *EvacuateShardResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *EvacuateShardResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Count != 0 { + mm.AppendUint32(1, x.Count) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *EvacuateShardResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse_Body") + } + switch fc.FieldNum { + case 1: // Count + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Count") + } + x.Count = data + } + } + return nil +} +func (x *EvacuateShardResponse_Body) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} +func (x *EvacuateShardResponse_Body) SetCount(v uint32) { + x.Count = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *EvacuateShardResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"count\":" + out.RawString(prefix) + out.Uint32(x.Count) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *EvacuateShardResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "count": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Count = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type EvacuateShardResponse struct { + Body *EvacuateShardResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*EvacuateShardResponse)(nil) + _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse)(nil) + _ json.Marshaler = (*EvacuateShardResponse)(nil) + _ json.Unmarshaler = (*EvacuateShardResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1294,27 +5397,6 @@ func (x *EvacuateShardResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *EvacuateShardResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1330,13 +5412,176 @@ func (x *EvacuateShardResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *EvacuateShardResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *EvacuateShardResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *EvacuateShardResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *EvacuateShardResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(EvacuateShardResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *EvacuateShardResponse) SetBody(v *EvacuateShardResponse_Body) { + x.Body = v +} +func (x *EvacuateShardResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *EvacuateShardResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *EvacuateShardResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *EvacuateShardResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *EvacuateShardResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *EvacuateShardResponse_Body + f = new(EvacuateShardResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type FlushCacheRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + Seal bool `json:"seal"` +} + +var ( + _ encoding.ProtoMarshaler = (*FlushCacheRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*FlushCacheRequest_Body)(nil) + _ json.Marshaler = (*FlushCacheRequest_Body)(nil) + _ json.Unmarshaler = (*FlushCacheRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1349,27 +5594,189 @@ func (x *FlushCacheRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *FlushCacheRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.BoolMarshal(2, buf[offset:], x.Seal) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *FlushCacheRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *FlushCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if x.Seal { + mm.AppendBool(2, x.Seal) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *FlushCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // Seal + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Seal") + } + x.Seal = data + } + } + return nil +} +func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *FlushCacheRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *FlushCacheRequest_Body) GetSeal() bool { + if x != nil { + return x.Seal + } + return false +} +func (x *FlushCacheRequest_Body) SetSeal(v bool) { + x.Seal = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *FlushCacheRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"seal\":" + out.RawString(prefix) + out.Bool(x.Seal) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *FlushCacheRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "seal": + { + var f bool + f = in.Bool() + x.Seal = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type FlushCacheRequest struct { + Body *FlushCacheRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*FlushCacheRequest)(nil) + _ encoding.ProtoUnmarshaler = (*FlushCacheRequest)(nil) + _ json.Marshaler = (*FlushCacheRequest)(nil) + _ json.Unmarshaler = (*FlushCacheRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1382,27 +5789,6 @@ func (x *FlushCacheRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *FlushCacheRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1418,13 +5804,174 @@ func (x *FlushCacheRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *FlushCacheRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *FlushCacheRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *FlushCacheRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *FlushCacheRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(FlushCacheRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *FlushCacheRequest) SetBody(v *FlushCacheRequest_Body) { + x.Body = v +} +func (x *FlushCacheRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *FlushCacheRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *FlushCacheRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *FlushCacheRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *FlushCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *FlushCacheRequest_Body + f = new(FlushCacheRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type FlushCacheResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*FlushCacheResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*FlushCacheResponse_Body)(nil) + _ json.Marshaler = (*FlushCacheResponse_Body)(nil) + _ json.Unmarshaler = (*FlushCacheResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1435,18 +5982,93 @@ func (x *FlushCacheResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *FlushCacheResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *FlushCacheResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *FlushCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *FlushCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *FlushCacheResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *FlushCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *FlushCacheResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *FlushCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type FlushCacheResponse struct { + Body *FlushCacheResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*FlushCacheResponse)(nil) + _ encoding.ProtoUnmarshaler = (*FlushCacheResponse)(nil) + _ json.Marshaler = (*FlushCacheResponse)(nil) + _ json.Unmarshaler = (*FlushCacheResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1459,27 +6081,6 @@ func (x *FlushCacheResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *FlushCacheResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1495,13 +6096,176 @@ func (x *FlushCacheResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *FlushCacheResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *FlushCacheResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *FlushCacheResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(FlushCacheResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *FlushCacheResponse) SetBody(v *FlushCacheResponse_Body) { + x.Body = v +} +func (x *FlushCacheResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *FlushCacheResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *FlushCacheResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *FlushCacheResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *FlushCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *FlushCacheResponse_Body + f = new(FlushCacheResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DoctorRequest_Body struct { + Concurrency uint32 `json:"concurrency"` + RemoveDuplicates bool `json:"removeDuplicates"` +} + +var ( + _ encoding.ProtoMarshaler = (*DoctorRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DoctorRequest_Body)(nil) + _ json.Marshaler = (*DoctorRequest_Body)(nil) + _ json.Unmarshaler = (*DoctorRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1514,27 +6278,173 @@ func (x *DoctorRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DoctorRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt32Marshal(1, buf[offset:], x.Concurrency) - offset += proto.BoolMarshal(2, buf[offset:], x.RemoveDuplicates) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DoctorRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DoctorRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Concurrency != 0 { + mm.AppendUint32(1, x.Concurrency) + } + if x.RemoveDuplicates { + mm.AppendBool(2, x.RemoveDuplicates) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DoctorRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DoctorRequest_Body") + } + switch fc.FieldNum { + case 1: // Concurrency + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Concurrency") + } + x.Concurrency = data + case 2: // RemoveDuplicates + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "RemoveDuplicates") + } + x.RemoveDuplicates = data + } + } + return nil +} +func (x *DoctorRequest_Body) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 +} +func (x *DoctorRequest_Body) SetConcurrency(v uint32) { + x.Concurrency = v +} +func (x *DoctorRequest_Body) GetRemoveDuplicates() bool { + if x != nil { + return x.RemoveDuplicates + } + return false +} +func (x *DoctorRequest_Body) SetRemoveDuplicates(v bool) { + x.RemoveDuplicates = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DoctorRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"concurrency\":" + out.RawString(prefix) + out.Uint32(x.Concurrency) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"removeDuplicates\":" + out.RawString(prefix) + out.Bool(x.RemoveDuplicates) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DoctorRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "concurrency": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Concurrency = f + } + case "removeDuplicates": + { + var f bool + f = in.Bool() + x.RemoveDuplicates = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DoctorRequest struct { + Body *DoctorRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DoctorRequest)(nil) + _ encoding.ProtoUnmarshaler = (*DoctorRequest)(nil) + _ json.Marshaler = (*DoctorRequest)(nil) + _ json.Unmarshaler = (*DoctorRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1547,27 +6457,6 @@ func (x *DoctorRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DoctorRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1583,13 +6472,174 @@ func (x *DoctorRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DoctorRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DoctorRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DoctorRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DoctorRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DoctorRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DoctorRequest) GetBody() *DoctorRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DoctorRequest) SetBody(v *DoctorRequest_Body) { + x.Body = v +} +func (x *DoctorRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DoctorRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DoctorRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DoctorRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DoctorRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DoctorRequest_Body + f = new(DoctorRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DoctorResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*DoctorResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DoctorResponse_Body)(nil) + _ json.Marshaler = (*DoctorResponse_Body)(nil) + _ json.Unmarshaler = (*DoctorResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1600,18 +6650,93 @@ func (x *DoctorResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DoctorResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DoctorResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DoctorResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DoctorResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DoctorResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DoctorResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DoctorResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DoctorResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DoctorResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DoctorResponse struct { + Body *DoctorResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DoctorResponse)(nil) + _ encoding.ProtoUnmarshaler = (*DoctorResponse)(nil) + _ json.Marshaler = (*DoctorResponse)(nil) + _ json.Unmarshaler = (*DoctorResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1624,27 +6749,6 @@ func (x *DoctorResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DoctorResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1660,13 +6764,215 @@ func (x *DoctorResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DoctorResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DoctorResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DoctorResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DoctorResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DoctorResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DoctorResponse) GetBody() *DoctorResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DoctorResponse) SetBody(v *DoctorResponse_Body) { + x.Body = v +} +func (x *DoctorResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DoctorResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DoctorResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DoctorResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DoctorResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DoctorResponse_Body + f = new(DoctorResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardEvacuationRequest_Body_Scope int32 + +const ( + StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0 + StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1 + StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2 +) + +var ( + StartShardEvacuationRequest_Body_Scope_name = map[int32]string{ + 0: "NONE", + 1: "OBJECTS", + 2: "TREES", + } + StartShardEvacuationRequest_Body_Scope_value = map[string]int32{ + "NONE": 0, + "OBJECTS": 1, + "TREES": 2, + } +) + +func (x StartShardEvacuationRequest_Body_Scope) String() string { + if v, ok := StartShardEvacuationRequest_Body_Scope_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool { + if v, ok := StartShardEvacuationRequest_Body_Scope_value[s]; ok { + *x = StartShardEvacuationRequest_Body_Scope(v) + return true + } + return false +} + +type StartShardEvacuationRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + IgnoreErrors bool `json:"ignoreErrors"` + Scope uint32 `json:"scope"` + ContainerWorkerCount uint32 `json:"containerWorkerCount"` + ObjectWorkerCount uint32 `json:"objectWorkerCount"` + RepOneOnly bool `json:"repOneOnly"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest_Body)(nil) + _ json.Marshaler = (*StartShardEvacuationRequest_Body)(nil) + _ json.Unmarshaler = (*StartShardEvacuationRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1677,31 +6983,355 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) { size += proto.RepeatedBytesSize(1, x.Shard_ID) size += proto.BoolSize(2, x.IgnoreErrors) size += proto.UInt32Size(3, x.Scope) + size += proto.UInt32Size(4, x.ContainerWorkerCount) + size += proto.UInt32Size(5, x.ObjectWorkerCount) + size += proto.BoolSize(6, x.RepOneOnly) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StartShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors) - offset += proto.UInt32Marshal(3, buf[offset:], x.Scope) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if x.IgnoreErrors { + mm.AppendBool(2, x.IgnoreErrors) + } + if x.Scope != 0 { + mm.AppendUint32(3, x.Scope) + } + if x.ContainerWorkerCount != 0 { + mm.AppendUint32(4, x.ContainerWorkerCount) + } + if x.ObjectWorkerCount != 0 { + mm.AppendUint32(5, x.ObjectWorkerCount) + } + if x.RepOneOnly { + mm.AppendBool(6, x.RepOneOnly) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // IgnoreErrors + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") + } + x.IgnoreErrors = data + case 3: // Scope + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Scope") + } + x.Scope = data + case 4: // ContainerWorkerCount + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount") + } + x.ContainerWorkerCount = data + case 5: // ObjectWorkerCount + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount") + } + x.ObjectWorkerCount = data + case 6: // RepOneOnly + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly") + } + x.RepOneOnly = data + } + } + return nil +} +func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *StartShardEvacuationRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool { + if x != nil { + return x.IgnoreErrors + } + return false +} +func (x *StartShardEvacuationRequest_Body) SetIgnoreErrors(v bool) { + x.IgnoreErrors = v +} +func (x *StartShardEvacuationRequest_Body) GetScope() uint32 { + if x != nil { + return x.Scope + } + return 0 +} +func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) { + x.Scope = v +} +func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 { + if x != nil { + return x.ContainerWorkerCount + } + return 0 +} +func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) { + x.ContainerWorkerCount = v +} +func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 { + if x != nil { + return x.ObjectWorkerCount + } + return 0 +} +func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) { + x.ObjectWorkerCount = v +} +func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool { + if x != nil { + return x.RepOneOnly + } + return false +} +func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) { + x.RepOneOnly = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" + out.RawString(prefix) + out.Bool(x.IgnoreErrors) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"scope\":" + out.RawString(prefix) + out.Uint32(x.Scope) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerWorkerCount\":" + out.RawString(prefix) + out.Uint32(x.ContainerWorkerCount) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectWorkerCount\":" + out.RawString(prefix) + out.Uint32(x.ObjectWorkerCount) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"repOneOnly\":" + out.RawString(prefix) + out.Bool(x.RepOneOnly) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "ignoreErrors": + { + var f bool + f = in.Bool() + x.IgnoreErrors = f + } + case "scope": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Scope = f + } + case "containerWorkerCount": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.ContainerWorkerCount = f + } + case "objectWorkerCount": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.ObjectWorkerCount = f + } + case "repOneOnly": + { + var f bool + f = in.Bool() + x.RepOneOnly = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardEvacuationRequest struct { + Body *StartShardEvacuationRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest)(nil) + _ json.Marshaler = (*StartShardEvacuationRequest)(nil) + _ json.Unmarshaler = (*StartShardEvacuationRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1714,27 +7344,6 @@ func (x *StartShardEvacuationRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StartShardEvacuationRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1750,13 +7359,174 @@ func (x *StartShardEvacuationRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *StartShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *StartShardEvacuationRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StartShardEvacuationRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StartShardEvacuationRequest) SetBody(v *StartShardEvacuationRequest_Body) { + x.Body = v +} +func (x *StartShardEvacuationRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StartShardEvacuationRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardEvacuationRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardEvacuationRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StartShardEvacuationRequest_Body + f = new(StartShardEvacuationRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardEvacuationResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse_Body)(nil) + _ json.Marshaler = (*StartShardEvacuationResponse_Body)(nil) + _ json.Unmarshaler = (*StartShardEvacuationResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1767,18 +7537,93 @@ func (x *StartShardEvacuationResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StartShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StartShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardEvacuationResponse struct { + Body *StartShardEvacuationResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse)(nil) + _ json.Marshaler = (*StartShardEvacuationResponse)(nil) + _ json.Unmarshaler = (*StartShardEvacuationResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1791,27 +7636,6 @@ func (x *StartShardEvacuationResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StartShardEvacuationResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1827,13 +7651,174 @@ func (x *StartShardEvacuationResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *StartShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *StartShardEvacuationResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StartShardEvacuationResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StartShardEvacuationResponse) SetBody(v *StartShardEvacuationResponse_Body) { + x.Body = v +} +func (x *StartShardEvacuationResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StartShardEvacuationResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardEvacuationResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardEvacuationResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StartShardEvacuationResponse_Body + f = new(StartShardEvacuationResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusRequest_Body)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1844,18 +7829,93 @@ func (x *GetShardEvacuationStatusRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusRequest struct { + Body *GetShardEvacuationStatusRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusRequest)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1868,27 +7928,6 @@ func (x *GetShardEvacuationStatusRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1904,13 +7943,210 @@ func (x *GetShardEvacuationStatusRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetShardEvacuationStatusRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetShardEvacuationStatusRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetShardEvacuationStatusRequest) SetBody(v *GetShardEvacuationStatusRequest_Body) { + x.Body = v +} +func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetShardEvacuationStatusRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetShardEvacuationStatusRequest_Body + f = new(GetShardEvacuationStatusRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusResponse_Body_Status int32 + +const ( + GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0 + GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1 + GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2 +) + +var ( + GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{ + 0: "EVACUATE_SHARD_STATUS_UNDEFINED", + 1: "RUNNING", + 2: "COMPLETED", + } + GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{ + "EVACUATE_SHARD_STATUS_UNDEFINED": 0, + "RUNNING": 1, + "COMPLETED": 2, + } +) + +func (x GetShardEvacuationStatusResponse_Body_Status) String() string { + if v, ok := GetShardEvacuationStatusResponse_Body_Status_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *GetShardEvacuationStatusResponse_Body_Status) FromString(s string) bool { + if v, ok := GetShardEvacuationStatusResponse_Body_Status_value[s]; ok { + *x = GetShardEvacuationStatusResponse_Body_Status(v) + return true + } + return false +} + +type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct { + Value int64 `json:"value"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1922,26 +8158,140 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableSize() (size return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.Int64Marshal(1, buf[offset:], x.Value) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Value != 0 { + mm.AppendInt64(1, x.Value) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_UnixTimestamp") + } + switch fc.FieldNum { + case 1: // Value + data, ok := fc.Int64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Value") + } + x.Value = data + } + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) SetValue(v int64) { + x.Value = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "value": + { + var f int64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseInt(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := int64(v) + f = pv + x.Value = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusResponse_Body_Duration struct { + Seconds int64 `json:"seconds"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1953,26 +8303,151 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) StableSize() (size int) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusResponse_Body_Duration) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.Int64Marshal(1, buf[offset:], x.Seconds) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusResponse_Body_Duration) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Seconds != 0 { + mm.AppendInt64(1, x.Seconds) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_Duration") + } + switch fc.FieldNum { + case 1: // Seconds + data, ok := fc.Int64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Seconds") + } + x.Seconds = data + } + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body_Duration) SetSeconds(v int64) { + x.Seconds = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"seconds\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "seconds": + { + var f int64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseInt(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := int64(v) + f = pv + x.Seconds = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusResponse_Body struct { + TotalObjects uint64 `json:"totalObjects"` + EvacuatedObjects uint64 `json:"evacuatedObjects"` + FailedObjects uint64 `json:"failedObjects"` + Shard_ID [][]byte `json:"shardID"` + Status GetShardEvacuationStatusResponse_Body_Status `json:"status"` + Duration *GetShardEvacuationStatusResponse_Body_Duration `json:"duration"` + StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `json:"startedAt"` + ErrorMessage string `json:"errorMessage"` + SkippedObjects uint64 `json:"skippedObjects"` + TotalTrees uint64 `json:"totalTrees"` + EvacuatedTrees uint64 `json:"evacuatedTrees"` + FailedTrees uint64 `json:"failedTrees"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1995,37 +8470,628 @@ func (x *GetShardEvacuationStatusResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt64Marshal(1, buf[offset:], x.TotalObjects) - offset += proto.UInt64Marshal(2, buf[offset:], x.EvacuatedObjects) - offset += proto.UInt64Marshal(3, buf[offset:], x.FailedObjects) - offset += proto.RepeatedBytesMarshal(4, buf[offset:], x.Shard_ID) - offset += proto.EnumMarshal(5, buf[offset:], int32(x.Status)) - offset += proto.NestedStructureMarshal(6, buf[offset:], x.Duration) - offset += proto.NestedStructureMarshal(7, buf[offset:], x.StartedAt) - offset += proto.StringMarshal(8, buf[offset:], x.ErrorMessage) - offset += proto.UInt64Marshal(9, buf[offset:], x.SkippedObjects) - offset += proto.UInt64Marshal(10, buf[offset:], x.TotalTrees) - offset += proto.UInt64Marshal(11, buf[offset:], x.EvacuatedTrees) - offset += proto.UInt64Marshal(12, buf[offset:], x.FailedTrees) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.TotalObjects != 0 { + mm.AppendUint64(1, x.TotalObjects) + } + if x.EvacuatedObjects != 0 { + mm.AppendUint64(2, x.EvacuatedObjects) + } + if x.FailedObjects != 0 { + mm.AppendUint64(3, x.FailedObjects) + } + for j := range x.Shard_ID { + mm.AppendBytes(4, x.Shard_ID[j]) + } + if int32(x.Status) != 0 { + mm.AppendInt32(5, int32(x.Status)) + } + if x.Duration != nil { + x.Duration.EmitProtobuf(mm.AppendMessage(6)) + } + if x.StartedAt != nil { + x.StartedAt.EmitProtobuf(mm.AppendMessage(7)) + } + if len(x.ErrorMessage) != 0 { + mm.AppendString(8, x.ErrorMessage) + } + if x.SkippedObjects != 0 { + mm.AppendUint64(9, x.SkippedObjects) + } + if x.TotalTrees != 0 { + mm.AppendUint64(10, x.TotalTrees) + } + if x.EvacuatedTrees != 0 { + mm.AppendUint64(11, x.EvacuatedTrees) + } + if x.FailedTrees != 0 { + mm.AppendUint64(12, x.FailedTrees) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body") + } + switch fc.FieldNum { + case 1: // TotalObjects + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TotalObjects") + } + x.TotalObjects = data + case 2: // EvacuatedObjects + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "EvacuatedObjects") + } + x.EvacuatedObjects = data + case 3: // FailedObjects + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "FailedObjects") + } + x.FailedObjects = data + case 4: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 5: // Status + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Status") + } + x.Status = GetShardEvacuationStatusResponse_Body_Status(data) + case 6: // Duration + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Duration") + } + x.Duration = new(GetShardEvacuationStatusResponse_Body_Duration) + if err := x.Duration.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 7: // StartedAt + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "StartedAt") + } + x.StartedAt = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp) + if err := x.StartedAt.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 8: // ErrorMessage + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ErrorMessage") + } + x.ErrorMessage = data + case 9: // SkippedObjects + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "SkippedObjects") + } + x.SkippedObjects = data + case 10: // TotalTrees + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TotalTrees") + } + x.TotalTrees = data + case 11: // EvacuatedTrees + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "EvacuatedTrees") + } + x.EvacuatedTrees = data + case 12: // FailedTrees + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "FailedTrees") + } + x.FailedTrees = data + } + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 { + if x != nil { + return x.TotalObjects + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetTotalObjects(v uint64) { + x.TotalObjects = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 { + if x != nil { + return x.EvacuatedObjects + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedObjects(v uint64) { + x.EvacuatedObjects = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 { + if x != nil { + return x.FailedObjects + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetFailedObjects(v uint64) { + x.FailedObjects = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status { + if x != nil { + return x.Status + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetStatus(v GetShardEvacuationStatusResponse_Body_Status) { + x.Status = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration { + if x != nil { + return x.Duration + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body) SetDuration(v *GetShardEvacuationStatusResponse_Body_Duration) { + x.Duration = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp { + if x != nil { + return x.StartedAt + } + return nil +} +func (x *GetShardEvacuationStatusResponse_Body) SetStartedAt(v *GetShardEvacuationStatusResponse_Body_UnixTimestamp) { + x.StartedAt = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} +func (x *GetShardEvacuationStatusResponse_Body) SetErrorMessage(v string) { + x.ErrorMessage = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 { + if x != nil { + return x.SkippedObjects + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetSkippedObjects(v uint64) { + x.SkippedObjects = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 { + if x != nil { + return x.TotalTrees + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetTotalTrees(v uint64) { + x.TotalTrees = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 { + if x != nil { + return x.EvacuatedTrees + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedTrees(v uint64) { + x.EvacuatedTrees = v +} +func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 { + if x != nil { + return x.FailedTrees + } + return 0 +} +func (x *GetShardEvacuationStatusResponse_Body) SetFailedTrees(v uint64) { + x.FailedTrees = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"totalObjects\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuatedObjects\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"failedObjects\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" + out.RawString(prefix) + v := int32(x.Status) + if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"duration\":" + out.RawString(prefix) + x.Duration.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"startedAt\":" + out.RawString(prefix) + x.StartedAt.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"errorMessage\":" + out.RawString(prefix) + out.String(x.ErrorMessage) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"skippedObjects\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"totalTrees\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuatedTrees\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"failedTrees\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "totalObjects": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.TotalObjects = f + } + case "evacuatedObjects": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.EvacuatedObjects = f + } + case "failedObjects": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.FailedObjects = f + } + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "status": + { + var f GetShardEvacuationStatusResponse_Body_Status + var parsedValue GetShardEvacuationStatusResponse_Body_Status + switch v := in.Interface().(type) { + case string: + if vv, ok := GetShardEvacuationStatusResponse_Body_Status_value[v]; ok { + parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv) + case float64: + parsedValue = GetShardEvacuationStatusResponse_Body_Status(v) + } + f = parsedValue + x.Status = f + } + case "duration": + { + var f *GetShardEvacuationStatusResponse_Body_Duration + f = new(GetShardEvacuationStatusResponse_Body_Duration) + f.UnmarshalEasyJSON(in) + x.Duration = f + } + case "startedAt": + { + var f *GetShardEvacuationStatusResponse_Body_UnixTimestamp + f = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp) + f.UnmarshalEasyJSON(in) + x.StartedAt = f + } + case "errorMessage": + { + var f string + f = in.String() + x.ErrorMessage = f + } + case "skippedObjects": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.SkippedObjects = f + } + case "totalTrees": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.TotalTrees = f + } + case "evacuatedTrees": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.EvacuatedTrees = f + } + case "failedTrees": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.FailedTrees = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetShardEvacuationStatusResponse struct { + Body *GetShardEvacuationStatusResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse)(nil) + _ json.Marshaler = (*GetShardEvacuationStatusResponse)(nil) + _ json.Unmarshaler = (*GetShardEvacuationStatusResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2038,27 +9104,6 @@ func (x *GetShardEvacuationStatusResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetShardEvacuationStatusResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2074,13 +9119,174 @@ func (x *GetShardEvacuationStatusResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetShardEvacuationStatusResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetShardEvacuationStatusResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetShardEvacuationStatusResponse) SetBody(v *GetShardEvacuationStatusResponse_Body) { + x.Body = v +} +func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetShardEvacuationStatusResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetShardEvacuationStatusResponse_Body + f = new(GetShardEvacuationStatusResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ResetShardEvacuationStatusRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) + _ json.Marshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) + _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2091,18 +9297,93 @@ func (x *ResetShardEvacuationStatusRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ResetShardEvacuationStatusRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ResetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ResetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ResetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ResetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ResetShardEvacuationStatusRequest struct { + Body *ResetShardEvacuationStatusRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest)(nil) + _ json.Marshaler = (*ResetShardEvacuationStatusRequest)(nil) + _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2115,27 +9396,6 @@ func (x *ResetShardEvacuationStatusRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ResetShardEvacuationStatusRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2151,13 +9411,174 @@ func (x *ResetShardEvacuationStatusRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ResetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ResetShardEvacuationStatusRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ResetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ResetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ResetShardEvacuationStatusRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ResetShardEvacuationStatusRequest) SetBody(v *ResetShardEvacuationStatusRequest_Body) { + x.Body = v +} +func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ResetShardEvacuationStatusRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ResetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ResetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ResetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ResetShardEvacuationStatusRequest_Body + f = new(ResetShardEvacuationStatusRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ResetShardEvacuationStatusResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) + _ json.Marshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) + _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2168,18 +9589,93 @@ func (x *ResetShardEvacuationStatusResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ResetShardEvacuationStatusResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ResetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ResetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ResetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ResetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ResetShardEvacuationStatusResponse struct { + Body *ResetShardEvacuationStatusResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse)(nil) + _ json.Marshaler = (*ResetShardEvacuationStatusResponse)(nil) + _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2192,27 +9688,6 @@ func (x *ResetShardEvacuationStatusResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ResetShardEvacuationStatusResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2228,13 +9703,174 @@ func (x *ResetShardEvacuationStatusResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ResetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ResetShardEvacuationStatusResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ResetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ResetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ResetShardEvacuationStatusResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ResetShardEvacuationStatusResponse) SetBody(v *ResetShardEvacuationStatusResponse_Body) { + x.Body = v +} +func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ResetShardEvacuationStatusResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ResetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ResetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ResetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ResetShardEvacuationStatusResponse_Body + f = new(ResetShardEvacuationStatusResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StopShardEvacuationRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest_Body)(nil) + _ json.Marshaler = (*StopShardEvacuationRequest_Body)(nil) + _ json.Unmarshaler = (*StopShardEvacuationRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2245,18 +9881,93 @@ func (x *StopShardEvacuationRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StopShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StopShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StopShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StopShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StopShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StopShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StopShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StopShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StopShardEvacuationRequest struct { + Body *StopShardEvacuationRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest)(nil) + _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest)(nil) + _ json.Marshaler = (*StopShardEvacuationRequest)(nil) + _ json.Unmarshaler = (*StopShardEvacuationRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2269,27 +9980,6 @@ func (x *StopShardEvacuationRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StopShardEvacuationRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2305,13 +9995,174 @@ func (x *StopShardEvacuationRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *StopShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *StopShardEvacuationRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StopShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StopShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StopShardEvacuationRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StopShardEvacuationRequest) SetBody(v *StopShardEvacuationRequest_Body) { + x.Body = v +} +func (x *StopShardEvacuationRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StopShardEvacuationRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StopShardEvacuationRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StopShardEvacuationRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StopShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StopShardEvacuationRequest_Body + f = new(StopShardEvacuationRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StopShardEvacuationResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse_Body)(nil) + _ json.Marshaler = (*StopShardEvacuationResponse_Body)(nil) + _ json.Unmarshaler = (*StopShardEvacuationResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2322,18 +10173,93 @@ func (x *StopShardEvacuationResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StopShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StopShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StopShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StopShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StopShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StopShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StopShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StopShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StopShardEvacuationResponse struct { + Body *StopShardEvacuationResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse)(nil) + _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse)(nil) + _ json.Marshaler = (*StopShardEvacuationResponse)(nil) + _ json.Unmarshaler = (*StopShardEvacuationResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2346,27 +10272,6 @@ func (x *StopShardEvacuationResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *StopShardEvacuationResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2382,13 +10287,176 @@ func (x *StopShardEvacuationResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *StopShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *StopShardEvacuationResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StopShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StopShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StopShardEvacuationResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StopShardEvacuationResponse) SetBody(v *StopShardEvacuationResponse_Body) { + x.Body = v +} +func (x *StopShardEvacuationResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StopShardEvacuationResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StopShardEvacuationResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StopShardEvacuationResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StopShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StopShardEvacuationResponse_Body + f = new(StopShardEvacuationResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddChainLocalOverrideRequest_Body struct { + Target *ChainTarget `json:"target"` + Chain []byte `json:"chain"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest_Body)(nil) + _ json.Marshaler = (*AddChainLocalOverrideRequest_Body)(nil) + _ json.Unmarshaler = (*AddChainLocalOverrideRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2401,27 +10469,179 @@ func (x *AddChainLocalOverrideRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target) - offset += proto.BytesMarshal(2, buf[offset:], x.Chain) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Target != nil { + x.Target.EmitProtobuf(mm.AppendMessage(1)) + } + if len(x.Chain) != 0 { + mm.AppendBytes(2, x.Chain) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest_Body") + } + switch fc.FieldNum { + case 1: // Target + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Target") + } + x.Target = new(ChainTarget) + if err := x.Target.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Chain + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Chain") + } + x.Chain = data + } + } + return nil +} +func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { + if x != nil { + return x.Target + } + return nil +} +func (x *AddChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { + x.Target = v +} +func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte { + if x != nil { + return x.Chain + } + return nil +} +func (x *AddChainLocalOverrideRequest_Body) SetChain(v []byte) { + x.Chain = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) + x.Target.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chain\":" + out.RawString(prefix) + if x.Chain != nil { + out.Base64Bytes(x.Chain) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "target": + { + var f *ChainTarget + f = new(ChainTarget) + f.UnmarshalEasyJSON(in) + x.Target = f + } + case "chain": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Chain = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddChainLocalOverrideRequest struct { + Body *AddChainLocalOverrideRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest)(nil) + _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest)(nil) + _ json.Marshaler = (*AddChainLocalOverrideRequest)(nil) + _ json.Unmarshaler = (*AddChainLocalOverrideRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2434,27 +10654,6 @@ func (x *AddChainLocalOverrideRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddChainLocalOverrideRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2470,13 +10669,175 @@ func (x *AddChainLocalOverrideRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddChainLocalOverrideRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddChainLocalOverrideRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddChainLocalOverrideRequest) SetBody(v *AddChainLocalOverrideRequest_Body) { + x.Body = v +} +func (x *AddChainLocalOverrideRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddChainLocalOverrideRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddChainLocalOverrideRequest_Body + f = new(AddChainLocalOverrideRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddChainLocalOverrideResponse_Body struct { + ChainId []byte `json:"chainId"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse_Body)(nil) + _ json.Marshaler = (*AddChainLocalOverrideResponse_Body)(nil) + _ json.Unmarshaler = (*AddChainLocalOverrideResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2488,26 +10849,141 @@ func (x *AddChainLocalOverrideResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ChainId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ChainId) != 0 { + mm.AppendBytes(1, x.ChainId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse_Body") + } + switch fc.FieldNum { + case 1: // ChainId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ChainId") + } + x.ChainId = data + } + } + return nil +} +func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} +func (x *AddChainLocalOverrideResponse_Body) SetChainId(v []byte) { + x.ChainId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" + out.RawString(prefix) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "chainId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ChainId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddChainLocalOverrideResponse struct { + Body *AddChainLocalOverrideResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse)(nil) + _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse)(nil) + _ json.Marshaler = (*AddChainLocalOverrideResponse)(nil) + _ json.Unmarshaler = (*AddChainLocalOverrideResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2520,27 +10996,6 @@ func (x *AddChainLocalOverrideResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddChainLocalOverrideResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2556,13 +11011,176 @@ func (x *AddChainLocalOverrideResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddChainLocalOverrideResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddChainLocalOverrideResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddChainLocalOverrideResponse) SetBody(v *AddChainLocalOverrideResponse_Body) { + x.Body = v +} +func (x *AddChainLocalOverrideResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddChainLocalOverrideResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddChainLocalOverrideResponse_Body + f = new(AddChainLocalOverrideResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetChainLocalOverrideRequest_Body struct { + Target *ChainTarget `json:"target"` + ChainId []byte `json:"chainId"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest_Body)(nil) + _ json.Marshaler = (*GetChainLocalOverrideRequest_Body)(nil) + _ json.Unmarshaler = (*GetChainLocalOverrideRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2575,27 +11193,179 @@ func (x *GetChainLocalOverrideRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target) - offset += proto.BytesMarshal(2, buf[offset:], x.ChainId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Target != nil { + x.Target.EmitProtobuf(mm.AppendMessage(1)) + } + if len(x.ChainId) != 0 { + mm.AppendBytes(2, x.ChainId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest_Body") + } + switch fc.FieldNum { + case 1: // Target + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Target") + } + x.Target = new(ChainTarget) + if err := x.Target.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // ChainId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ChainId") + } + x.ChainId = data + } + } + return nil +} +func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { + if x != nil { + return x.Target + } + return nil +} +func (x *GetChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { + x.Target = v +} +func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} +func (x *GetChainLocalOverrideRequest_Body) SetChainId(v []byte) { + x.ChainId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) + x.Target.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" + out.RawString(prefix) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "target": + { + var f *ChainTarget + f = new(ChainTarget) + f.UnmarshalEasyJSON(in) + x.Target = f + } + case "chainId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ChainId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetChainLocalOverrideRequest struct { + Body *GetChainLocalOverrideRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest)(nil) + _ json.Marshaler = (*GetChainLocalOverrideRequest)(nil) + _ json.Unmarshaler = (*GetChainLocalOverrideRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2608,27 +11378,6 @@ func (x *GetChainLocalOverrideRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetChainLocalOverrideRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2644,13 +11393,175 @@ func (x *GetChainLocalOverrideRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetChainLocalOverrideRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetChainLocalOverrideRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetChainLocalOverrideRequest) SetBody(v *GetChainLocalOverrideRequest_Body) { + x.Body = v +} +func (x *GetChainLocalOverrideRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetChainLocalOverrideRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetChainLocalOverrideRequest_Body + f = new(GetChainLocalOverrideRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetChainLocalOverrideResponse_Body struct { + Chain []byte `json:"chain"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse_Body)(nil) + _ json.Marshaler = (*GetChainLocalOverrideResponse_Body)(nil) + _ json.Unmarshaler = (*GetChainLocalOverrideResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2662,26 +11573,141 @@ func (x *GetChainLocalOverrideResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Chain) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Chain) != 0 { + mm.AppendBytes(1, x.Chain) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse_Body") + } + switch fc.FieldNum { + case 1: // Chain + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Chain") + } + x.Chain = data + } + } + return nil +} +func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte { + if x != nil { + return x.Chain + } + return nil +} +func (x *GetChainLocalOverrideResponse_Body) SetChain(v []byte) { + x.Chain = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chain\":" + out.RawString(prefix) + if x.Chain != nil { + out.Base64Bytes(x.Chain) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "chain": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Chain = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetChainLocalOverrideResponse struct { + Body *GetChainLocalOverrideResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse)(nil) + _ json.Marshaler = (*GetChainLocalOverrideResponse)(nil) + _ json.Unmarshaler = (*GetChainLocalOverrideResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2694,27 +11720,6 @@ func (x *GetChainLocalOverrideResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetChainLocalOverrideResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2730,13 +11735,175 @@ func (x *GetChainLocalOverrideResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetChainLocalOverrideResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetChainLocalOverrideResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetChainLocalOverrideResponse) SetBody(v *GetChainLocalOverrideResponse_Body) { + x.Body = v +} +func (x *GetChainLocalOverrideResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetChainLocalOverrideResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetChainLocalOverrideResponse_Body + f = new(GetChainLocalOverrideResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListChainLocalOverridesRequest_Body struct { + Target *ChainTarget `json:"target"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest_Body)(nil) + _ json.Marshaler = (*ListChainLocalOverridesRequest_Body)(nil) + _ json.Unmarshaler = (*ListChainLocalOverridesRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2748,26 +11915,135 @@ func (x *ListChainLocalOverridesRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListChainLocalOverridesRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListChainLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Target != nil { + x.Target.EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListChainLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest_Body") + } + switch fc.FieldNum { + case 1: // Target + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Target") + } + x.Target = new(ChainTarget) + if err := x.Target.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget { + if x != nil { + return x.Target + } + return nil +} +func (x *ListChainLocalOverridesRequest_Body) SetTarget(v *ChainTarget) { + x.Target = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListChainLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) + x.Target.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListChainLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListChainLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "target": + { + var f *ChainTarget + f = new(ChainTarget) + f.UnmarshalEasyJSON(in) + x.Target = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListChainLocalOverridesRequest struct { + Body *ListChainLocalOverridesRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest)(nil) + _ json.Marshaler = (*ListChainLocalOverridesRequest)(nil) + _ json.Unmarshaler = (*ListChainLocalOverridesRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2780,27 +12056,6 @@ func (x *ListChainLocalOverridesRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListChainLocalOverridesRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2816,13 +12071,175 @@ func (x *ListChainLocalOverridesRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListChainLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListChainLocalOverridesRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListChainLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListChainLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListChainLocalOverridesRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListChainLocalOverridesRequest) SetBody(v *ListChainLocalOverridesRequest_Body) { + x.Body = v +} +func (x *ListChainLocalOverridesRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListChainLocalOverridesRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListChainLocalOverridesRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListChainLocalOverridesRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListChainLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListChainLocalOverridesRequest_Body + f = new(ListChainLocalOverridesRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListChainLocalOverridesResponse_Body struct { + Chains [][]byte `json:"chains"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse_Body)(nil) + _ json.Marshaler = (*ListChainLocalOverridesResponse_Body)(nil) + _ json.Unmarshaler = (*ListChainLocalOverridesResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2834,26 +12251,155 @@ func (x *ListChainLocalOverridesResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListChainLocalOverridesResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Chains) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListChainLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListChainLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Chains { + mm.AppendBytes(1, x.Chains[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListChainLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse_Body") + } + switch fc.FieldNum { + case 1: // Chains + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Chains") + } + x.Chains = append(x.Chains, data) + } + } + return nil +} +func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte { + if x != nil { + return x.Chains + } + return nil +} +func (x *ListChainLocalOverridesResponse_Body) SetChains(v [][]byte) { + x.Chains = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListChainLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chains\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Chains { + if i != 0 { + out.RawByte(',') + } + if x.Chains[i] != nil { + out.Base64Bytes(x.Chains[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListChainLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "chains": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Chains = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListChainLocalOverridesResponse struct { + Body *ListChainLocalOverridesResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse)(nil) + _ json.Marshaler = (*ListChainLocalOverridesResponse)(nil) + _ json.Unmarshaler = (*ListChainLocalOverridesResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2866,27 +12412,6 @@ func (x *ListChainLocalOverridesResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListChainLocalOverridesResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2902,13 +12427,175 @@ func (x *ListChainLocalOverridesResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListChainLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListChainLocalOverridesResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListChainLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListChainLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListChainLocalOverridesResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListChainLocalOverridesResponse) SetBody(v *ListChainLocalOverridesResponse_Body) { + x.Body = v +} +func (x *ListChainLocalOverridesResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListChainLocalOverridesResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListChainLocalOverridesResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListChainLocalOverridesResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListChainLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListChainLocalOverridesResponse_Body + f = new(ListChainLocalOverridesResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListTargetsLocalOverridesRequest_Body struct { + ChainName string `json:"chainName"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) + _ json.Marshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) + _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2920,26 +12607,131 @@ func (x *ListTargetsLocalOverridesRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListTargetsLocalOverridesRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.StringMarshal(1, buf[offset:], x.ChainName) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListTargetsLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListTargetsLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ChainName) != 0 { + mm.AppendString(1, x.ChainName) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest_Body") + } + switch fc.FieldNum { + case 1: // ChainName + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ChainName") + } + x.ChainName = data + } + } + return nil +} +func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string { + if x != nil { + return x.ChainName + } + return "" +} +func (x *ListTargetsLocalOverridesRequest_Body) SetChainName(v string) { + x.ChainName = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListTargetsLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainName\":" + out.RawString(prefix) + out.String(x.ChainName) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "chainName": + { + var f string + f = in.String() + x.ChainName = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListTargetsLocalOverridesRequest struct { + Body *ListTargetsLocalOverridesRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest)(nil) + _ json.Marshaler = (*ListTargetsLocalOverridesRequest)(nil) + _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -2952,27 +12744,6 @@ func (x *ListTargetsLocalOverridesRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListTargetsLocalOverridesRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -2988,13 +12759,175 @@ func (x *ListTargetsLocalOverridesRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListTargetsLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListTargetsLocalOverridesRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListTargetsLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListTargetsLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListTargetsLocalOverridesRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListTargetsLocalOverridesRequest) SetBody(v *ListTargetsLocalOverridesRequest_Body) { + x.Body = v +} +func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListTargetsLocalOverridesRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListTargetsLocalOverridesRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListTargetsLocalOverridesRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListTargetsLocalOverridesRequest_Body + f = new(ListTargetsLocalOverridesRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListTargetsLocalOverridesResponse_Body struct { + Targets []ChainTarget `json:"targets"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) + _ json.Marshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) + _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3003,33 +12936,155 @@ func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) { return 0 } for i := range x.Targets { - size += proto.NestedStructureSize(1, x.Targets[i]) + size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListTargetsLocalOverridesResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - for i := range x.Targets { - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Targets[i]) - } - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListTargetsLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for i := range x.Targets { + x.Targets[i].EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse_Body") + } + switch fc.FieldNum { + case 1: // Targets + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Targets") + } + x.Targets = append(x.Targets, ChainTarget{}) + ff := &x.Targets[len(x.Targets)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget { + if x != nil { + return x.Targets + } + return nil +} +func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) { + x.Targets = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListTargetsLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"targets\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Targets { + if i != 0 { + out.RawByte(',') + } + x.Targets[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "targets": + { + var f ChainTarget + var list []ChainTarget + in.Delim('[') + for !in.IsDelim(']') { + f = ChainTarget{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Targets = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListTargetsLocalOverridesResponse struct { + Body *ListTargetsLocalOverridesResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse)(nil) + _ json.Marshaler = (*ListTargetsLocalOverridesResponse)(nil) + _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3042,27 +13097,6 @@ func (x *ListTargetsLocalOverridesResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ListTargetsLocalOverridesResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3078,13 +13112,176 @@ func (x *ListTargetsLocalOverridesResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ListTargetsLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ListTargetsLocalOverridesResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListTargetsLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListTargetsLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListTargetsLocalOverridesResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListTargetsLocalOverridesResponse) SetBody(v *ListTargetsLocalOverridesResponse_Body) { + x.Body = v +} +func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListTargetsLocalOverridesResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListTargetsLocalOverridesResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListTargetsLocalOverridesResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListTargetsLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListTargetsLocalOverridesResponse_Body + f = new(ListTargetsLocalOverridesResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverrideRequest_Body struct { + Target *ChainTarget `json:"target"` + ChainId []byte `json:"chainId"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) + _ json.Marshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3097,27 +13294,179 @@ func (x *RemoveChainLocalOverrideRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target) - offset += proto.BytesMarshal(2, buf[offset:], x.ChainId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Target != nil { + x.Target.EmitProtobuf(mm.AppendMessage(1)) + } + if len(x.ChainId) != 0 { + mm.AppendBytes(2, x.ChainId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest_Body") + } + switch fc.FieldNum { + case 1: // Target + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Target") + } + x.Target = new(ChainTarget) + if err := x.Target.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // ChainId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ChainId") + } + x.ChainId = data + } + } + return nil +} +func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { + if x != nil { + return x.Target + } + return nil +} +func (x *RemoveChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { + x.Target = v +} +func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} +func (x *RemoveChainLocalOverrideRequest_Body) SetChainId(v []byte) { + x.ChainId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) + x.Target.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" + out.RawString(prefix) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "target": + { + var f *ChainTarget + f = new(ChainTarget) + f.UnmarshalEasyJSON(in) + x.Target = f + } + case "chainId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ChainId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverrideRequest struct { + Body *RemoveChainLocalOverrideRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest)(nil) + _ json.Marshaler = (*RemoveChainLocalOverrideRequest)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3130,27 +13479,6 @@ func (x *RemoveChainLocalOverrideRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverrideRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3166,13 +13494,174 @@ func (x *RemoveChainLocalOverrideRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveChainLocalOverrideRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveChainLocalOverrideRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveChainLocalOverrideRequest) SetBody(v *RemoveChainLocalOverrideRequest_Body) { + x.Body = v +} +func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveChainLocalOverrideRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveChainLocalOverrideRequest_Body + f = new(RemoveChainLocalOverrideRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverrideResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) + _ json.Marshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3183,18 +13672,93 @@ func (x *RemoveChainLocalOverrideResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverrideResponse struct { + Body *RemoveChainLocalOverrideResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse)(nil) + _ json.Marshaler = (*RemoveChainLocalOverrideResponse)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3207,27 +13771,6 @@ func (x *RemoveChainLocalOverrideResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverrideResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3243,13 +13786,175 @@ func (x *RemoveChainLocalOverrideResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveChainLocalOverrideResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveChainLocalOverrideResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveChainLocalOverrideResponse) SetBody(v *RemoveChainLocalOverrideResponse_Body) { + x.Body = v +} +func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveChainLocalOverrideResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveChainLocalOverrideResponse_Body + f = new(RemoveChainLocalOverrideResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverridesByTargetRequest_Body struct { + Target *ChainTarget `json:"target"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) + _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3261,26 +13966,135 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableSize() (size int) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Target != nil { + x.Target.EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest_Body") + } + switch fc.FieldNum { + case 1: // Target + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Target") + } + x.Target = new(ChainTarget) + if err := x.Target.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget { + if x != nil { + return x.Target + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetRequest_Body) SetTarget(v *ChainTarget) { + x.Target = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) + x.Target.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "target": + { + var f *ChainTarget + f = new(ChainTarget) + f.UnmarshalEasyJSON(in) + x.Target = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverridesByTargetRequest struct { + Body *RemoveChainLocalOverridesByTargetRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) + _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3293,27 +14107,6 @@ func (x *RemoveChainLocalOverridesByTargetRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverridesByTargetRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3329,13 +14122,174 @@ func (x *RemoveChainLocalOverridesByTargetRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveChainLocalOverridesByTargetRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveChainLocalOverridesByTargetRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetRequest) SetBody(v *RemoveChainLocalOverridesByTargetRequest_Body) { + x.Body = v +} +func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveChainLocalOverridesByTargetRequest_Body + f = new(RemoveChainLocalOverridesByTargetRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverridesByTargetResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) + _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3346,18 +14300,93 @@ func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableSize() (size int) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverridesByTargetResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveChainLocalOverridesByTargetResponse struct { + Body *RemoveChainLocalOverridesByTargetResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) + _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) + _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3370,27 +14399,6 @@ func (x *RemoveChainLocalOverridesByTargetResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveChainLocalOverridesByTargetResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3406,13 +14414,179 @@ func (x *RemoveChainLocalOverridesByTargetResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveChainLocalOverridesByTargetResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveChainLocalOverridesByTargetResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetResponse) SetBody(v *RemoveChainLocalOverridesByTargetResponse_Body) { + x.Body = v +} +func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveChainLocalOverridesByTargetResponse_Body + f = new(RemoveChainLocalOverridesByTargetResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SealWriteCacheRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + IgnoreErrors bool `json:"ignoreErrors"` + Async bool `json:"async"` + RestoreMode bool `json:"restoreMode"` + Shrink bool `json:"shrink"` +} + +var ( + _ encoding.ProtoMarshaler = (*SealWriteCacheRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest_Body)(nil) + _ json.Marshaler = (*SealWriteCacheRequest_Body)(nil) + _ json.Unmarshaler = (*SealWriteCacheRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3422,30 +14596,297 @@ func (x *SealWriteCacheRequest_Body) StableSize() (size int) { } size += proto.RepeatedBytesSize(1, x.Shard_ID) size += proto.BoolSize(2, x.IgnoreErrors) + size += proto.BoolSize(3, x.Async) + size += proto.BoolSize(4, x.RestoreMode) + size += proto.BoolSize(5, x.Shrink) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SealWriteCacheRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SealWriteCacheRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SealWriteCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if x.IgnoreErrors { + mm.AppendBool(2, x.IgnoreErrors) + } + if x.Async { + mm.AppendBool(3, x.Async) + } + if x.RestoreMode { + mm.AppendBool(4, x.RestoreMode) + } + if x.Shrink { + mm.AppendBool(5, x.Shrink) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SealWriteCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // IgnoreErrors + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") + } + x.IgnoreErrors = data + case 3: // Async + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Async") + } + x.Async = data + case 4: // RestoreMode + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "RestoreMode") + } + x.RestoreMode = data + case 5: // Shrink + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shrink") + } + x.Shrink = data + } + } + return nil +} +func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *SealWriteCacheRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool { + if x != nil { + return x.IgnoreErrors + } + return false +} +func (x *SealWriteCacheRequest_Body) SetIgnoreErrors(v bool) { + x.IgnoreErrors = v +} +func (x *SealWriteCacheRequest_Body) GetAsync() bool { + if x != nil { + return x.Async + } + return false +} +func (x *SealWriteCacheRequest_Body) SetAsync(v bool) { + x.Async = v +} +func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool { + if x != nil { + return x.RestoreMode + } + return false +} +func (x *SealWriteCacheRequest_Body) SetRestoreMode(v bool) { + x.RestoreMode = v +} +func (x *SealWriteCacheRequest_Body) GetShrink() bool { + if x != nil { + return x.Shrink + } + return false +} +func (x *SealWriteCacheRequest_Body) SetShrink(v bool) { + x.Shrink = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SealWriteCacheRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" + out.RawString(prefix) + out.Bool(x.IgnoreErrors) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"async\":" + out.RawString(prefix) + out.Bool(x.Async) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"restoreMode\":" + out.RawString(prefix) + out.Bool(x.RestoreMode) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shrink\":" + out.RawString(prefix) + out.Bool(x.Shrink) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SealWriteCacheRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "ignoreErrors": + { + var f bool + f = in.Bool() + x.IgnoreErrors = f + } + case "async": + { + var f bool + f = in.Bool() + x.Async = f + } + case "restoreMode": + { + var f bool + f = in.Bool() + x.RestoreMode = f + } + case "shrink": + { + var f bool + f = in.Bool() + x.Shrink = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SealWriteCacheRequest struct { + Body *SealWriteCacheRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SealWriteCacheRequest)(nil) + _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest)(nil) + _ json.Marshaler = (*SealWriteCacheRequest)(nil) + _ json.Unmarshaler = (*SealWriteCacheRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3458,27 +14899,6 @@ func (x *SealWriteCacheRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SealWriteCacheRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3494,13 +14914,177 @@ func (x *SealWriteCacheRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SealWriteCacheRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SealWriteCacheRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SealWriteCacheRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SealWriteCacheRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SealWriteCacheRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SealWriteCacheRequest) SetBody(v *SealWriteCacheRequest_Body) { + x.Body = v +} +func (x *SealWriteCacheRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SealWriteCacheRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SealWriteCacheRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SealWriteCacheRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SealWriteCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SealWriteCacheRequest_Body + f = new(SealWriteCacheRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SealWriteCacheResponse_Body_Status struct { + Shard_ID []byte `json:"shardID"` + Success bool `json:"success"` + Error string `json:"error"` +} + +var ( + _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body_Status)(nil) + _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body_Status)(nil) + _ json.Marshaler = (*SealWriteCacheResponse_Body_Status)(nil) + _ json.Unmarshaler = (*SealWriteCacheResponse_Body_Status)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3514,28 +15098,208 @@ func (x *SealWriteCacheResponse_Body_Status) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SealWriteCacheResponse_Body_Status) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.BoolMarshal(2, buf[offset:], x.Success) - offset += proto.StringMarshal(3, buf[offset:], x.Error) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SealWriteCacheResponse_Body_Status) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SealWriteCacheResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Shard_ID) != 0 { + mm.AppendBytes(1, x.Shard_ID) + } + if x.Success { + mm.AppendBool(2, x.Success) + } + if len(x.Error) != 0 { + mm.AppendString(3, x.Error) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SealWriteCacheResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body_Status") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = data + case 2: // Success + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Success") + } + x.Success = data + case 3: // Error + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Error") + } + x.Error = data + } + } + return nil +} +func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *SealWriteCacheResponse_Body_Status) SetShard_ID(v []byte) { + x.Shard_ID = v +} +func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} +func (x *SealWriteCacheResponse_Body_Status) SetSuccess(v bool) { + x.Success = v +} +func (x *SealWriteCacheResponse_Body_Status) GetError() string { + if x != nil { + return x.Error + } + return "" +} +func (x *SealWriteCacheResponse_Body_Status) SetError(v string) { + x.Error = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SealWriteCacheResponse_Body_Status) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"success\":" + out.RawString(prefix) + out.Bool(x.Success) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"error\":" + out.RawString(prefix) + out.String(x.Error) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SealWriteCacheResponse_Body_Status) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Shard_ID = f + } + case "success": + { + var f bool + f = in.Bool() + x.Success = f + } + case "error": + { + var f string + f = in.String() + x.Error = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SealWriteCacheResponse_Body struct { + Results []SealWriteCacheResponse_Body_Status `json:"results"` +} + +var ( + _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body)(nil) + _ json.Marshaler = (*SealWriteCacheResponse_Body)(nil) + _ json.Unmarshaler = (*SealWriteCacheResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3544,33 +15308,155 @@ func (x *SealWriteCacheResponse_Body) StableSize() (size int) { return 0 } for i := range x.Results { - size += proto.NestedStructureSize(1, x.Results[i]) + size += proto.NestedStructureSizeUnchecked(1, &x.Results[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SealWriteCacheResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - for i := range x.Results { - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Results[i]) - } - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SealWriteCacheResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for i := range x.Results { + x.Results[i].EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body") + } + switch fc.FieldNum { + case 1: // Results + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Results") + } + x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{}) + ff := &x.Results[len(x.Results)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status { + if x != nil { + return x.Results + } + return nil +} +func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) { + x.Results = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SealWriteCacheResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"results\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Results { + if i != 0 { + out.RawByte(',') + } + x.Results[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SealWriteCacheResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "results": + { + var f SealWriteCacheResponse_Body_Status + var list []SealWriteCacheResponse_Body_Status + in.Delim('[') + for !in.IsDelim(']') { + f = SealWriteCacheResponse_Body_Status{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Results = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type SealWriteCacheResponse struct { + Body *SealWriteCacheResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*SealWriteCacheResponse)(nil) + _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse)(nil) + _ json.Marshaler = (*SealWriteCacheResponse)(nil) + _ json.Unmarshaler = (*SealWriteCacheResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3583,27 +15469,6 @@ func (x *SealWriteCacheResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *SealWriteCacheResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3619,13 +15484,175 @@ func (x *SealWriteCacheResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *SealWriteCacheResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *SealWriteCacheResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *SealWriteCacheResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *SealWriteCacheResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(SealWriteCacheResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *SealWriteCacheResponse) SetBody(v *SealWriteCacheResponse_Body) { + x.Body = v +} +func (x *SealWriteCacheResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *SealWriteCacheResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *SealWriteCacheResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *SealWriteCacheResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *SealWriteCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *SealWriteCacheResponse_Body + f = new(SealWriteCacheResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DetachShardsRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` +} + +var ( + _ encoding.ProtoMarshaler = (*DetachShardsRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DetachShardsRequest_Body)(nil) + _ json.Marshaler = (*DetachShardsRequest_Body)(nil) + _ json.Unmarshaler = (*DetachShardsRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3637,26 +15664,155 @@ func (x *DetachShardsRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DetachShardsRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DetachShardsRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DetachShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DetachShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + } + } + return nil +} +func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *DetachShardsRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DetachShardsRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DetachShardsRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DetachShardsRequest struct { + Body *DetachShardsRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DetachShardsRequest)(nil) + _ encoding.ProtoUnmarshaler = (*DetachShardsRequest)(nil) + _ json.Marshaler = (*DetachShardsRequest)(nil) + _ json.Unmarshaler = (*DetachShardsRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3669,27 +15825,6 @@ func (x *DetachShardsRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DetachShardsRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3705,13 +15840,174 @@ func (x *DetachShardsRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DetachShardsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DetachShardsRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DetachShardsRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DetachShardsRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DetachShardsRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DetachShardsRequest) SetBody(v *DetachShardsRequest_Body) { + x.Body = v +} +func (x *DetachShardsRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DetachShardsRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DetachShardsRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DetachShardsRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DetachShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DetachShardsRequest_Body + f = new(DetachShardsRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DetachShardsResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*DetachShardsResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*DetachShardsResponse_Body)(nil) + _ json.Marshaler = (*DetachShardsResponse_Body)(nil) + _ json.Unmarshaler = (*DetachShardsResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3722,18 +16018,93 @@ func (x *DetachShardsResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DetachShardsResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DetachShardsResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *DetachShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DetachShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DetachShardsResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DetachShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DetachShardsResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DetachShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type DetachShardsResponse struct { + Body *DetachShardsResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*DetachShardsResponse)(nil) + _ encoding.ProtoUnmarshaler = (*DetachShardsResponse)(nil) + _ json.Marshaler = (*DetachShardsResponse)(nil) + _ json.Unmarshaler = (*DetachShardsResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -3746,27 +16117,6 @@ func (x *DetachShardsResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *DetachShardsResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -3782,9 +16132,1898 @@ func (x *DetachShardsResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *DetachShardsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *DetachShardsResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *DetachShardsResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *DetachShardsResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(DetachShardsResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *DetachShardsResponse) SetBody(v *DetachShardsResponse_Body) { + x.Body = v +} +func (x *DetachShardsResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *DetachShardsResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *DetachShardsResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *DetachShardsResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *DetachShardsResponse_Body + f = new(DetachShardsResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardRebuildRequest_Body struct { + Shard_ID [][]byte `json:"shardID"` + TargetFillPercent uint32 `json:"targetFillPercent"` + ConcurrencyLimit uint32 `json:"concurrencyLimit"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil) + _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil) + _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *StartShardRebuildRequest_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.RepeatedBytesSize(1, x.Shard_ID) + size += proto.UInt32Size(2, x.TargetFillPercent) + size += proto.UInt32Size(3, x.ConcurrencyLimit) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } + if x.TargetFillPercent != 0 { + mm.AppendUint32(2, x.TargetFillPercent) + } + if x.ConcurrencyLimit != 0 { + mm.AppendUint32(3, x.ConcurrencyLimit) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + case 2: // TargetFillPercent + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent") + } + x.TargetFillPercent = data + case 3: // ConcurrencyLimit + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit") + } + x.ConcurrencyLimit = data + } + } + return nil +} +func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} +func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 { + if x != nil { + return x.TargetFillPercent + } + return 0 +} +func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) { + x.TargetFillPercent = v +} +func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 { + if x != nil { + return x.ConcurrencyLimit + } + return 0 +} +func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) { + x.ConcurrencyLimit = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"targetFillPercent\":" + out.RawString(prefix) + out.Uint32(x.TargetFillPercent) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"concurrencyLimit\":" + out.RawString(prefix) + out.Uint32(x.ConcurrencyLimit) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + case "targetFillPercent": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.TargetFillPercent = f + } + case "concurrencyLimit": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.ConcurrencyLimit = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardRebuildRequest struct { + Body *StartShardRebuildRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil) + _ json.Marshaler = (*StartShardRebuildRequest)(nil) + _ json.Unmarshaler = (*StartShardRebuildRequest)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *StartShardRebuildRequest) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *StartShardRebuildRequest) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StartShardRebuildRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) { + x.Body = v +} +func (x *StartShardRebuildRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StartShardRebuildRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StartShardRebuildRequest_Body + f = new(StartShardRebuildRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardRebuildResponse_Body_Status struct { + Shard_ID []byte `json:"shardID"` + Success bool `json:"success"` + Error string `json:"error"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil) + _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil) + _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.BytesSize(1, x.Shard_ID) + size += proto.BoolSize(2, x.Success) + size += proto.StringSize(3, x.Error) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Shard_ID) != 0 { + mm.AppendBytes(1, x.Shard_ID) + } + if x.Success { + mm.AppendBool(2, x.Success) + } + if len(x.Error) != 0 { + mm.AppendString(3, x.Error) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = data + case 2: // Success + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Success") + } + x.Success = data + case 3: // Error + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Error") + } + x.Error = data + } + } + return nil +} +func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) { + x.Shard_ID = v +} +func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} +func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) { + x.Success = v +} +func (x *StartShardRebuildResponse_Body_Status) GetError() string { + if x != nil { + return x.Error + } + return "" +} +func (x *StartShardRebuildResponse_Body_Status) SetError(v string) { + x.Error = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"success\":" + out.RawString(prefix) + out.Bool(x.Success) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"error\":" + out.RawString(prefix) + out.String(x.Error) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Shard_ID = f + } + case "success": + { + var f bool + f = in.Bool() + x.Success = f + } + case "error": + { + var f string + f = in.String() + x.Error = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardRebuildResponse_Body struct { + Results []StartShardRebuildResponse_Body_Status `json:"results"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil) + _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil) + _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *StartShardRebuildResponse_Body) StableSize() (size int) { + if x == nil { + return 0 + } + for i := range x.Results { + size += proto.NestedStructureSizeUnchecked(1, &x.Results[i]) + } + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for i := range x.Results { + x.Results[i].EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body") + } + switch fc.FieldNum { + case 1: // Results + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Results") + } + x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{}) + ff := &x.Results[len(x.Results)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status { + if x != nil { + return x.Results + } + return nil +} +func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) { + x.Results = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"results\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Results { + if i != 0 { + out.RawByte(',') + } + x.Results[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "results": + { + var f StartShardRebuildResponse_Body_Status + var list []StartShardRebuildResponse_Body_Status + in.Delim('[') + for !in.IsDelim(']') { + f = StartShardRebuildResponse_Body_Status{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Results = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type StartShardRebuildResponse struct { + Body *StartShardRebuildResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil) + _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil) + _ json.Marshaler = (*StartShardRebuildResponse)(nil) + _ json.Unmarshaler = (*StartShardRebuildResponse)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *StartShardRebuildResponse) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *StartShardRebuildResponse) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(StartShardRebuildResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) { + x.Body = v +} +func (x *StartShardRebuildResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *StartShardRebuildResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *StartShardRebuildResponse_Body + f = new(StartShardRebuildResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectRequest_Body struct { + ObjectId string `json:"objectId"` + ContainerId string `json:"containerId"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.StringSize(1, x.ObjectId) + size += proto.StringSize(2, x.ContainerId) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ObjectId) != 0 { + mm.AppendString(1, x.ObjectId) + } + if len(x.ContainerId) != 0 { + mm.AppendString(2, x.ContainerId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") + } + switch fc.FieldNum { + case 1: // ObjectId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectId") + } + x.ObjectId = data + case 2: // ContainerId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + } + } + return nil +} +func (x *ListShardsForObjectRequest_Body) GetObjectId() string { + if x != nil { + return x.ObjectId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { + x.ObjectId = v +} +func (x *ListShardsForObjectRequest_Body) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { + x.ContainerId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectId\":" + out.RawString(prefix) + out.String(x.ObjectId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + out.String(x.ContainerId) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "objectId": + { + var f string + f = in.String() + x.ObjectId = f + } + case "containerId": + { + var f string + f = in.String() + x.ContainerId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectRequest struct { + Body *ListShardsForObjectRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectRequest) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { + x.Body = v +} +func (x *ListShardsForObjectRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectRequest_Body + f = new(ListShardsForObjectRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse_Body struct { + Shard_ID [][]byte `json:"shardID"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.RepeatedBytesSize(1, x.Shard_ID) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + } + } + return nil +} +func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse struct { + Body *ListShardsForObjectResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectResponse) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { + x.Body = v +} +func (x *ListShardsForObjectResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectResponse_Body + f = new(ListShardsForObjectResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index fa9de974a..045662ccf 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -26,7 +26,6 @@ const ( ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards" ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode" ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree" - ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard" ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation" ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus" ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus" @@ -41,6 +40,8 @@ const ( ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides" ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" + ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" + ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" ) // ControlServiceClient is the client API for ControlService service. @@ -61,10 +62,6 @@ type ControlServiceClient interface { SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -97,6 +94,10 @@ type ControlServiceClient interface { SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error) // DetachShards detaches and closes shards. DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) + // StartShardRebuild starts shard rebuild process. + StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) } type controlServiceClient struct { @@ -170,15 +171,6 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron return out, nil } -func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) { - out := new(EvacuateShardResponse) - err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) { out := new(StartShardEvacuationResponse) err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...) @@ -305,6 +297,24 @@ func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShard return out, nil } +func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) { + out := new(StartShardRebuildResponse) + err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { + out := new(ListShardsForObjectResponse) + err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ControlServiceServer is the server API for ControlService service. // All implementations should embed UnimplementedControlServiceServer // for forward compatibility @@ -323,10 +333,6 @@ type ControlServiceServer interface { SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -359,6 +365,10 @@ type ControlServiceServer interface { SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error) // DetachShards detaches and closes shards. DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) + // StartShardRebuild starts shard rebuild process. + StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) } // UnimplementedControlServiceServer should be embedded to have forward compatible implementations. @@ -386,9 +396,6 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented") } -func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented") -} func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented") } @@ -431,6 +438,12 @@ func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWr func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented") } +func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") +} +func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") +} // UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ControlServiceServer will @@ -569,24 +582,6 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } -func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EvacuateShardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).EvacuateShard(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_EvacuateShard_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartShardEvacuationRequest) if err := dec(in); err != nil { @@ -839,6 +834,42 @@ func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartShardRebuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServiceServer).StartShardRebuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControlService_StartShardRebuild_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListShardsForObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServiceServer).ListShardsForObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControlService_ListShardsForObject_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -874,10 +905,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SynchronizeTree", Handler: _ControlService_SynchronizeTree_Handler, }, - { - MethodName: "EvacuateShard", - Handler: _ControlService_EvacuateShard_Handler, - }, { MethodName: "StartShardEvacuation", Handler: _ControlService_StartShardEvacuation_Handler, @@ -934,6 +961,14 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "DetachShards", Handler: _ControlService_DetachShards_Handler, }, + { + MethodName: "StartShardRebuild", + Handler: _ControlService_StartShardRebuild_Handler, + }, + { + MethodName: "ListShardsForObject", + Handler: _ControlService_ListShardsForObject_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/services/control/service.proto", diff --git a/pkg/services/control/service_test.go b/pkg/services/control/service_test.go deleted file mode 100644 index 1d98cc6f1..000000000 --- a/pkg/services/control/service_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package control_test - -import ( - "bytes" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" -) - -func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - generateHealthCheckResponseBody(), - new(control.HealthCheckResponse_Body), - func(m1, m2 protoMessage) bool { - return equalHealthCheckResponseBodies( - m1.(*control.HealthCheckResponse_Body), - m2.(*control.HealthCheckResponse_Body), - ) - }, - ) -} - -func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body { - body := new(control.HealthCheckResponse_Body) - body.SetNetmapStatus(control.NetmapStatus_ONLINE) - body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN) - - return body -} - -func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool { - return b1.GetNetmapStatus() == b2.GetNetmapStatus() && - b1.GetHealthStatus() == b2.GetHealthStatus() -} - -func TestSetNetmapStatusRequest_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - generateSetNetmapStatusRequestBody(), - new(control.SetNetmapStatusRequest_Body), - func(m1, m2 protoMessage) bool { - return equalSetnetmapStatusRequestBodies( - m1.(*control.SetNetmapStatusRequest_Body), - m2.(*control.SetNetmapStatusRequest_Body), - ) - }, - ) -} - -func generateSetNetmapStatusRequestBody() *control.SetNetmapStatusRequest_Body { - body := new(control.SetNetmapStatusRequest_Body) - body.SetStatus(control.NetmapStatus_ONLINE) - - return body -} - -func equalSetnetmapStatusRequestBodies(b1, b2 *control.SetNetmapStatusRequest_Body) bool { - return b1.GetStatus() == b2.GetStatus() -} - -func TestListShardsResponse_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - generateListShardsResponseBody(), - new(control.ListShardsResponse_Body), - func(m1, m2 protoMessage) bool { - return equalListShardResponseBodies( - m1.(*control.ListShardsResponse_Body), - m2.(*control.ListShardsResponse_Body), - ) - }, - ) -} - -func equalListShardResponseBodies(b1, b2 *control.ListShardsResponse_Body) bool { - if len(b1.Shards) != len(b2.Shards) { - return false - } - - for i := range b1.Shards { - if b1.Shards[i].GetMetabasePath() != b2.Shards[i].GetMetabasePath() || - b1.Shards[i].GetWritecachePath() != b2.Shards[i].GetWritecachePath() || - b1.Shards[i].GetPiloramaPath() != b2.Shards[i].GetPiloramaPath() || - !bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[i].GetShard_ID()) { - return false - } - - info1 := b1.Shards[i].GetBlobstor() - info2 := b2.Shards[i].GetBlobstor() - if !compareBlobstorInfo(info1, info2) { - return false - } - } - - for i := range b1.Shards { - for j := i + 1; j < len(b1.Shards); j++ { - if b1.Shards[i].GetMetabasePath() == b2.Shards[j].GetMetabasePath() || - !compareBlobstorInfo(b1.Shards[i].Blobstor, b2.Shards[i].Blobstor) || - b1.Shards[i].GetWritecachePath() == b2.Shards[j].GetWritecachePath() || - bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[j].GetShard_ID()) { - return false - } - } - } - - return true -} - -func compareBlobstorInfo(a, b []*control.BlobstorInfo) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i].Type != b[i].Type || - a[i].Path != b[i].Path { - return false - } - } - return true -} - -func generateListShardsResponseBody() *control.ListShardsResponse_Body { - body := new(control.ListShardsResponse_Body) - body.SetShards([]*control.ShardInfo{ - generateShardInfo(0), - generateShardInfo(1), - }) - - return body -} - -func TestSetShardModeRequest_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - generateSetShardModeRequestBody(), - new(control.SetShardModeRequest_Body), - func(m1, m2 protoMessage) bool { - return equalSetShardModeRequestBodies( - m1.(*control.SetShardModeRequest_Body), - m2.(*control.SetShardModeRequest_Body), - ) - }, - ) -} - -func generateSetShardModeRequestBody() *control.SetShardModeRequest_Body { - body := new(control.SetShardModeRequest_Body) - body.SetShardIDList([][]byte{{0, 1, 2, 3, 4}}) - body.SetMode(control.ShardMode_READ_WRITE) - - return body -} - -func equalSetShardModeRequestBodies(b1, b2 *control.SetShardModeRequest_Body) bool { - if b1.GetMode() != b2.GetMode() || len(b1.Shard_ID) != len(b2.Shard_ID) { - return false - } - - for i := range b1.Shard_ID { - if !bytes.Equal(b1.Shard_ID[i], b2.Shard_ID[i]) { - return false - } - } - - return true -} - -func TestSynchronizeTreeRequest_Body_StableMarshal(t *testing.T) { - testStableMarshal(t, - &control.SynchronizeTreeRequest_Body{ - ContainerId: []byte{1, 2, 3, 4, 5, 6, 7}, - TreeId: "someID", - Height: 42, - }, - new(control.SynchronizeTreeRequest_Body), - func(m1, m2 protoMessage) bool { - b1 := m1.(*control.SynchronizeTreeRequest_Body) - b2 := m2.(*control.SynchronizeTreeRequest_Body) - return bytes.Equal(b1.GetContainerId(), b2.GetContainerId()) && - b1.GetTreeId() == b2.GetTreeId() && - b1.GetHeight() == b2.GetHeight() - }, - ) -} diff --git a/pkg/services/control/types.go b/pkg/services/control/types.go deleted file mode 100644 index 94f681c55..000000000 --- a/pkg/services/control/types.go +++ /dev/null @@ -1,118 +0,0 @@ -package control - -import ( - "google.golang.org/protobuf/encoding/protojson" -) - -// SetKey sets public key used for signing. -func (x *Signature) SetKey(v []byte) { - if x != nil { - x.Key = v - } -} - -// SetSign sets binary signature. -func (x *Signature) SetSign(v []byte) { - if x != nil { - x.Sign = v - } -} - -// SetKey sets key of the node attribute. -func (x *NodeInfo_Attribute) SetKey(v string) { - if x != nil { - x.Key = v - } -} - -// SetValue sets value of the node attribute. -func (x *NodeInfo_Attribute) SetValue(v string) { - if x != nil { - x.Value = v - } -} - -// SetParents sets parent keys. -func (x *NodeInfo_Attribute) SetParents(v []string) { - if x != nil { - x.Parents = v - } -} - -// SetPublicKey sets public key of the FrostFS node in a binary format. -func (x *NodeInfo) SetPublicKey(v []byte) { - if x != nil { - x.PublicKey = v - } -} - -// SetAddresses sets ways to connect to a node. -func (x *NodeInfo) SetAddresses(v []string) { - if x != nil { - x.Addresses = v - } -} - -// SetAttributes sets attributes of the FrostFS Storage Node. -func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) { - if x != nil { - x.Attributes = v - } -} - -// SetState sets state of the FrostFS node. -func (x *NodeInfo) SetState(v NetmapStatus) { - if x != nil { - x.State = v - } -} - -// SetEpoch sets revision number of the network map. -func (x *Netmap) SetEpoch(v uint64) { - if x != nil { - x.Epoch = v - } -} - -// SetNodes sets nodes presented in network. -func (x *Netmap) SetNodes(v []*NodeInfo) { - if x != nil { - x.Nodes = v - } -} - -func (x *Netmap) MarshalJSON() ([]byte, error) { - return protojson.MarshalOptions{ - EmitUnpopulated: true, - }.Marshal(x) -} - -// SetID sets identificator of the shard. -func (x *ShardInfo) SetID(v []byte) { - x.Shard_ID = v -} - -// SetMetabasePath sets path to shard's metabase. -func (x *ShardInfo) SetMetabasePath(v string) { - x.MetabasePath = v -} - -// SetWriteCachePath sets path to shard's write-cache. -func (x *ShardInfo) SetWriteCachePath(v string) { - x.WritecachePath = v -} - -// SetPiloramaPath sets path to shard's pilorama. -func (x *ShardInfo) SetPiloramaPath(v string) { - x.PiloramaPath = v -} - -// SetMode sets path to shard's work mode. -func (x *ShardInfo) SetMode(v ShardMode) { - x.Mode = v -} - -// SetErrorCount sets shard's error counter. -func (x *ShardInfo) SetErrorCount(count uint32) { - x.ErrorCount = count -} diff --git a/pkg/services/control/types.pb.go b/pkg/services/control/types.pb.go deleted file mode 100644 index 858755694..000000000 --- a/pkg/services/control/types.pb.go +++ /dev/null @@ -1,1011 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/control/types.proto - -package control - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Status of the storage node in the FrostFS network map. -type NetmapStatus int32 - -const ( - // Undefined status, default value. - NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0 - // Node is online. - NetmapStatus_ONLINE NetmapStatus = 1 - // Node is offline. - NetmapStatus_OFFLINE NetmapStatus = 2 - // Node is maintained by the owner. - NetmapStatus_MAINTENANCE NetmapStatus = 3 -) - -// Enum value maps for NetmapStatus. -var ( - NetmapStatus_name = map[int32]string{ - 0: "STATUS_UNDEFINED", - 1: "ONLINE", - 2: "OFFLINE", - 3: "MAINTENANCE", - } - NetmapStatus_value = map[string]int32{ - "STATUS_UNDEFINED": 0, - "ONLINE": 1, - "OFFLINE": 2, - "MAINTENANCE": 3, - } -) - -func (x NetmapStatus) Enum() *NetmapStatus { - p := new(NetmapStatus) - *p = x - return p -} - -func (x NetmapStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NetmapStatus) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_types_proto_enumTypes[0].Descriptor() -} - -func (NetmapStatus) Type() protoreflect.EnumType { - return &file_pkg_services_control_types_proto_enumTypes[0] -} - -func (x NetmapStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NetmapStatus.Descriptor instead. -func (NetmapStatus) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0} -} - -// Health status of the storage node application. -type HealthStatus int32 - -const ( - // Undefined status, default value. - HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 - // Storage node application is starting. - HealthStatus_STARTING HealthStatus = 1 - // Storage node application is started and serves all services. - HealthStatus_READY HealthStatus = 2 - // Storage node application is shutting down. - HealthStatus_SHUTTING_DOWN HealthStatus = 3 - // Storage node application is reconfiguring. - HealthStatus_RECONFIGURING HealthStatus = 4 -) - -// Enum value maps for HealthStatus. -var ( - HealthStatus_name = map[int32]string{ - 0: "HEALTH_STATUS_UNDEFINED", - 1: "STARTING", - 2: "READY", - 3: "SHUTTING_DOWN", - 4: "RECONFIGURING", - } - HealthStatus_value = map[string]int32{ - "HEALTH_STATUS_UNDEFINED": 0, - "STARTING": 1, - "READY": 2, - "SHUTTING_DOWN": 3, - "RECONFIGURING": 4, - } -) - -func (x HealthStatus) Enum() *HealthStatus { - p := new(HealthStatus) - *p = x - return p -} - -func (x HealthStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HealthStatus) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_types_proto_enumTypes[1].Descriptor() -} - -func (HealthStatus) Type() protoreflect.EnumType { - return &file_pkg_services_control_types_proto_enumTypes[1] -} - -func (x HealthStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HealthStatus.Descriptor instead. -func (HealthStatus) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1} -} - -// Work mode of the shard. -type ShardMode int32 - -const ( - // Undefined mode, default value. - ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0 - // Read-write. - ShardMode_READ_WRITE ShardMode = 1 - // Read-only. - ShardMode_READ_ONLY ShardMode = 2 - // Degraded. - ShardMode_DEGRADED ShardMode = 3 - // DegradedReadOnly. - ShardMode_DEGRADED_READ_ONLY ShardMode = 4 -) - -// Enum value maps for ShardMode. -var ( - ShardMode_name = map[int32]string{ - 0: "SHARD_MODE_UNDEFINED", - 1: "READ_WRITE", - 2: "READ_ONLY", - 3: "DEGRADED", - 4: "DEGRADED_READ_ONLY", - } - ShardMode_value = map[string]int32{ - "SHARD_MODE_UNDEFINED": 0, - "READ_WRITE": 1, - "READ_ONLY": 2, - "DEGRADED": 3, - "DEGRADED_READ_ONLY": 4, - } -) - -func (x ShardMode) Enum() *ShardMode { - p := new(ShardMode) - *p = x - return p -} - -func (x ShardMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ShardMode) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_types_proto_enumTypes[2].Descriptor() -} - -func (ShardMode) Type() protoreflect.EnumType { - return &file_pkg_services_control_types_proto_enumTypes[2] -} - -func (x ShardMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ShardMode.Descriptor instead. -func (ShardMode) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2} -} - -type ChainTarget_TargetType int32 - -const ( - ChainTarget_UNDEFINED ChainTarget_TargetType = 0 - ChainTarget_NAMESPACE ChainTarget_TargetType = 1 - ChainTarget_CONTAINER ChainTarget_TargetType = 2 - ChainTarget_USER ChainTarget_TargetType = 3 - ChainTarget_GROUP ChainTarget_TargetType = 4 -) - -// Enum value maps for ChainTarget_TargetType. -var ( - ChainTarget_TargetType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "NAMESPACE", - 2: "CONTAINER", - 3: "USER", - 4: "GROUP", - } - ChainTarget_TargetType_value = map[string]int32{ - "UNDEFINED": 0, - "NAMESPACE": 1, - "CONTAINER": 2, - "USER": 3, - "GROUP": 4, - } -) - -func (x ChainTarget_TargetType) Enum() *ChainTarget_TargetType { - p := new(ChainTarget_TargetType) - *p = x - return p -} - -func (x ChainTarget_TargetType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ChainTarget_TargetType) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_control_types_proto_enumTypes[3].Descriptor() -} - -func (ChainTarget_TargetType) Type() protoreflect.EnumType { - return &file_pkg_services_control_types_proto_enumTypes[3] -} - -func (x ChainTarget_TargetType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ChainTarget_TargetType.Descriptor instead. -func (ChainTarget_TargetType) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{5, 0} -} - -// Signature of some message. -type Signature struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Public key used for signing. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Binary signature. - Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"` -} - -func (x *Signature) Reset() { - *x = Signature{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Signature) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Signature) ProtoMessage() {} - -func (x *Signature) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Signature.ProtoReflect.Descriptor instead. -func (*Signature) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0} -} - -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} - -// FrostFS node description. -type NodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Public key of the FrostFS node in a binary format. - PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - // Ways to connect to a node. - Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` - // Carries list of the FrostFS node attributes in a key-value form. Key name - // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo - // structures with duplicated attribute names or attributes with empty values - // will be considered invalid. - Attributes []*NodeInfo_Attribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - // Carries state of the FrostFS node. - State NetmapStatus `protobuf:"varint,4,opt,name=state,proto3,enum=control.NetmapStatus" json:"state,omitempty"` -} - -func (x *NodeInfo) Reset() { - *x = NodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeInfo) ProtoMessage() {} - -func (x *NodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead. -func (*NodeInfo) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1} -} - -func (x *NodeInfo) GetPublicKey() []byte { - if x != nil { - return x.PublicKey - } - return nil -} - -func (x *NodeInfo) GetAddresses() []string { - if x != nil { - return x.Addresses - } - return nil -} - -func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute { - if x != nil { - return x.Attributes - } - return nil -} - -func (x *NodeInfo) GetState() NetmapStatus { - if x != nil { - return x.State - } - return NetmapStatus_STATUS_UNDEFINED -} - -// Network map structure. -type Netmap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Network map revision number. - Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` - // Nodes presented in network. - Nodes []*NodeInfo `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"` -} - -func (x *Netmap) Reset() { - *x = Netmap{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Netmap) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Netmap) ProtoMessage() {} - -func (x *Netmap) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Netmap.ProtoReflect.Descriptor instead. -func (*Netmap) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2} -} - -func (x *Netmap) GetEpoch() uint64 { - if x != nil { - return x.Epoch - } - return 0 -} - -func (x *Netmap) GetNodes() []*NodeInfo { - if x != nil { - return x.Nodes - } - return nil -} - -// Shard description. -type ShardInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the shard. - Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"` - // Path to shard's metabase. - MetabasePath string `protobuf:"bytes,2,opt,name=metabase_path,json=metabasePath,proto3" json:"metabase_path,omitempty"` - // Shard's blobstor info. - Blobstor []*BlobstorInfo `protobuf:"bytes,3,rep,name=blobstor,proto3" json:"blobstor,omitempty"` - // Path to shard's write-cache, empty if disabled. - WritecachePath string `protobuf:"bytes,4,opt,name=writecache_path,json=writecachePath,proto3" json:"writecache_path,omitempty"` - // Work mode of the shard. - Mode ShardMode `protobuf:"varint,5,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"` - // Amount of errors occured. - ErrorCount uint32 `protobuf:"varint,6,opt,name=errorCount,proto3" json:"errorCount,omitempty"` - // Path to shard's pilorama storage. - PiloramaPath string `protobuf:"bytes,7,opt,name=pilorama_path,json=piloramaPath,proto3" json:"pilorama_path,omitempty"` -} - -func (x *ShardInfo) Reset() { - *x = ShardInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShardInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShardInfo) ProtoMessage() {} - -func (x *ShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShardInfo.ProtoReflect.Descriptor instead. -func (*ShardInfo) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{3} -} - -func (x *ShardInfo) GetShard_ID() []byte { - if x != nil { - return x.Shard_ID - } - return nil -} - -func (x *ShardInfo) GetMetabasePath() string { - if x != nil { - return x.MetabasePath - } - return "" -} - -func (x *ShardInfo) GetBlobstor() []*BlobstorInfo { - if x != nil { - return x.Blobstor - } - return nil -} - -func (x *ShardInfo) GetWritecachePath() string { - if x != nil { - return x.WritecachePath - } - return "" -} - -func (x *ShardInfo) GetMode() ShardMode { - if x != nil { - return x.Mode - } - return ShardMode_SHARD_MODE_UNDEFINED -} - -func (x *ShardInfo) GetErrorCount() uint32 { - if x != nil { - return x.ErrorCount - } - return 0 -} - -func (x *ShardInfo) GetPiloramaPath() string { - if x != nil { - return x.PiloramaPath - } - return "" -} - -// Blobstor component description. -type BlobstorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Path to the root. - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // Component type. - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` -} - -func (x *BlobstorInfo) Reset() { - *x = BlobstorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BlobstorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BlobstorInfo) ProtoMessage() {} - -func (x *BlobstorInfo) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BlobstorInfo.ProtoReflect.Descriptor instead. -func (*BlobstorInfo) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{4} -} - -func (x *BlobstorInfo) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *BlobstorInfo) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -// ChainTarget is an object to which local overrides -// are applied. -type ChainTarget struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type ChainTarget_TargetType `protobuf:"varint,1,opt,name=type,proto3,enum=control.ChainTarget_TargetType" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` -} - -func (x *ChainTarget) Reset() { - *x = ChainTarget{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ChainTarget) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChainTarget) ProtoMessage() {} - -func (x *ChainTarget) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ChainTarget.ProtoReflect.Descriptor instead. -func (*ChainTarget) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{5} -} - -func (x *ChainTarget) GetType() ChainTarget_TargetType { - if x != nil { - return x.Type - } - return ChainTarget_UNDEFINED -} - -func (x *ChainTarget) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Administrator-defined Attributes of the FrostFS Storage Node. -// -// `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8 -// string. Value can't be empty. -// -// Node's attributes are mostly used during Storage Policy evaluation to -// calculate object's placement and find a set of nodes satisfying policy -// requirements. There are some "well-known" node attributes common to all the -// Storage Nodes in the network and used implicitly with default values if not -// explicitly set: -// -// - Capacity \ -// Total available disk space in Gigabytes. -// - Price \ -// Price in GAS tokens for storing one GB of data during one Epoch. In node -// attributes it's a string presenting floating point number with comma or -// point delimiter for decimal part. In the Network Map it will be saved as -// 64-bit unsigned integer representing number of minimal token fractions. -// - Locode \ -// Node's geographic location in -// [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html) -// format approximated to the nearest point defined in standard. -// - Country \ -// Country code in -// [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) -// format. Calculated automatically from `Locode` attribute -// - Region \ -// Country's administative subdivision where node is located. Calculated -// automatically from `Locode` attribute based on `SubDiv` field. Presented -// in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format. -// - City \ -// City, town, village or rural area name where node is located written -// without diacritics . Calculated automatically from `Locode` attribute. -// -// For detailed description of each well-known attribute please see the -// corresponding section in FrostFS Technical specification. -type NodeInfo_Attribute struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key of the node attribute. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Value of the node attribute. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // Parent keys, if any. For example for `City` it could be `Region` and - // `Country`. - Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"` -} - -func (x *NodeInfo_Attribute) Reset() { - *x = NodeInfo_Attribute{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_control_types_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeInfo_Attribute) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeInfo_Attribute) ProtoMessage() {} - -func (x *NodeInfo_Attribute) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_control_types_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeInfo_Attribute.ProtoReflect.Descriptor instead. -func (*NodeInfo_Attribute) Descriptor() ([]byte, []int) { - return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *NodeInfo_Attribute) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *NodeInfo_Attribute) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -func (x *NodeInfo_Attribute) GetParents() []string { - if x != nil { - return x.Parents - } - return nil -} - -var File_pkg_services_control_types_proto protoreflect.FileDescriptor - -var file_pkg_services_control_types_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x36, 0x0a, 0x09, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, - 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x3b, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x4d, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x47, 0x0a, 0x06, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x27, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, - 0x94, 0x02, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, - 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, - 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74, - 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, - 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61, 0x6d, 0x61, 0x5f, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61, - 0x6d, 0x61, 0x50, 0x61, 0x74, 0x68, 0x22, 0x36, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74, - 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa6, - 0x01, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x33, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, - 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, - 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x04, 0x2a, 0x4e, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x6d, 0x61, - 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46, - 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, - 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x2a, 0x6a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c, 0x54, - 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11, 0x0a, - 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x03, - 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, - 0x47, 0x10, 0x04, 0x2a, 0x6a, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x18, 0x0a, 0x14, 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, - 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, - 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, - 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, - 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x47, 0x52, 0x41, - 0x44, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x42, - 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, - 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, - 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, - 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_services_control_types_proto_rawDescOnce sync.Once - file_pkg_services_control_types_proto_rawDescData = file_pkg_services_control_types_proto_rawDesc -) - -func file_pkg_services_control_types_proto_rawDescGZIP() []byte { - file_pkg_services_control_types_proto_rawDescOnce.Do(func() { - file_pkg_services_control_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_types_proto_rawDescData) - }) - return file_pkg_services_control_types_proto_rawDescData -} - -var file_pkg_services_control_types_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_pkg_services_control_types_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_pkg_services_control_types_proto_goTypes = []interface{}{ - (NetmapStatus)(0), // 0: control.NetmapStatus - (HealthStatus)(0), // 1: control.HealthStatus - (ShardMode)(0), // 2: control.ShardMode - (ChainTarget_TargetType)(0), // 3: control.ChainTarget.TargetType - (*Signature)(nil), // 4: control.Signature - (*NodeInfo)(nil), // 5: control.NodeInfo - (*Netmap)(nil), // 6: control.Netmap - (*ShardInfo)(nil), // 7: control.ShardInfo - (*BlobstorInfo)(nil), // 8: control.BlobstorInfo - (*ChainTarget)(nil), // 9: control.ChainTarget - (*NodeInfo_Attribute)(nil), // 10: control.NodeInfo.Attribute -} -var file_pkg_services_control_types_proto_depIdxs = []int32{ - 10, // 0: control.NodeInfo.attributes:type_name -> control.NodeInfo.Attribute - 0, // 1: control.NodeInfo.state:type_name -> control.NetmapStatus - 5, // 2: control.Netmap.nodes:type_name -> control.NodeInfo - 8, // 3: control.ShardInfo.blobstor:type_name -> control.BlobstorInfo - 2, // 4: control.ShardInfo.mode:type_name -> control.ShardMode - 3, // 5: control.ChainTarget.type:type_name -> control.ChainTarget.TargetType - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_pkg_services_control_types_proto_init() } -func file_pkg_services_control_types_proto_init() { - if File_pkg_services_control_types_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_services_control_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Signature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Netmap); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlobstorInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChainTarget); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_control_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeInfo_Attribute); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_control_types_proto_rawDesc, - NumEnums: 4, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pkg_services_control_types_proto_goTypes, - DependencyIndexes: file_pkg_services_control_types_proto_depIdxs, - EnumInfos: file_pkg_services_control_types_proto_enumTypes, - MessageInfos: file_pkg_services_control_types_proto_msgTypes, - }.Build() - File_pkg_services_control_types_proto = out.File - file_pkg_services_control_types_proto_rawDesc = nil - file_pkg_services_control_types_proto_goTypes = nil - file_pkg_services_control_types_proto_depIdxs = nil -} diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto index 55636d88a..d8135ed64 100644 --- a/pkg/services/control/types.proto +++ b/pkg/services/control/types.proto @@ -142,6 +142,9 @@ message ShardInfo { // Path to shard's pilorama storage. string pilorama_path = 7 [ json_name = "piloramaPath" ]; + + // Evacuation status. + bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ]; } // Blobstor component description. diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go index 858d85a1c..69d87292d 100644 --- a/pkg/services/control/types_frostfs.pb.go +++ b/pkg/services/control/types_frostfs.pb.go @@ -2,7 +2,149 @@ package control -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +import ( + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" +) + +type NetmapStatus int32 + +const ( + NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0 + NetmapStatus_ONLINE NetmapStatus = 1 + NetmapStatus_OFFLINE NetmapStatus = 2 + NetmapStatus_MAINTENANCE NetmapStatus = 3 +) + +var ( + NetmapStatus_name = map[int32]string{ + 0: "STATUS_UNDEFINED", + 1: "ONLINE", + 2: "OFFLINE", + 3: "MAINTENANCE", + } + NetmapStatus_value = map[string]int32{ + "STATUS_UNDEFINED": 0, + "ONLINE": 1, + "OFFLINE": 2, + "MAINTENANCE": 3, + } +) + +func (x NetmapStatus) String() string { + if v, ok := NetmapStatus_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *NetmapStatus) FromString(s string) bool { + if v, ok := NetmapStatus_value[s]; ok { + *x = NetmapStatus(v) + return true + } + return false +} + +type HealthStatus int32 + +const ( + HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 + HealthStatus_STARTING HealthStatus = 1 + HealthStatus_READY HealthStatus = 2 + HealthStatus_SHUTTING_DOWN HealthStatus = 3 + HealthStatus_RECONFIGURING HealthStatus = 4 +) + +var ( + HealthStatus_name = map[int32]string{ + 0: "HEALTH_STATUS_UNDEFINED", + 1: "STARTING", + 2: "READY", + 3: "SHUTTING_DOWN", + 4: "RECONFIGURING", + } + HealthStatus_value = map[string]int32{ + "HEALTH_STATUS_UNDEFINED": 0, + "STARTING": 1, + "READY": 2, + "SHUTTING_DOWN": 3, + "RECONFIGURING": 4, + } +) + +func (x HealthStatus) String() string { + if v, ok := HealthStatus_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *HealthStatus) FromString(s string) bool { + if v, ok := HealthStatus_value[s]; ok { + *x = HealthStatus(v) + return true + } + return false +} + +type ShardMode int32 + +const ( + ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0 + ShardMode_READ_WRITE ShardMode = 1 + ShardMode_READ_ONLY ShardMode = 2 + ShardMode_DEGRADED ShardMode = 3 + ShardMode_DEGRADED_READ_ONLY ShardMode = 4 +) + +var ( + ShardMode_name = map[int32]string{ + 0: "SHARD_MODE_UNDEFINED", + 1: "READ_WRITE", + 2: "READ_ONLY", + 3: "DEGRADED", + 4: "DEGRADED_READ_ONLY", + } + ShardMode_value = map[string]int32{ + "SHARD_MODE_UNDEFINED": 0, + "READ_WRITE": 1, + "READ_ONLY": 2, + "DEGRADED": 3, + "DEGRADED_READ_ONLY": 4, + } +) + +func (x ShardMode) String() string { + if v, ok := ShardMode_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *ShardMode) FromString(s string) bool { + if v, ok := ShardMode_value[s]; ok { + *x = ShardMode(v) + return true + } + return false +} + +type Signature struct { + Key []byte `json:"key"` + Sign []byte `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*Signature)(nil) + _ encoding.ProtoUnmarshaler = (*Signature)(nil) + _ json.Marshaler = (*Signature)(nil) + _ json.Unmarshaler = (*Signature)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -16,27 +158,186 @@ func (x *Signature) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *Signature) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Key) - offset += proto.BytesMarshal(2, buf[offset:], x.Sign) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *Signature) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendBytes(1, x.Key) + } + if len(x.Sign) != 0 { + mm.AppendBytes(2, x.Sign) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "Signature") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Sign + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Sign") + } + x.Sign = data + } + } + return nil +} +func (x *Signature) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} +func (x *Signature) SetKey(v []byte) { + x.Key = v +} +func (x *Signature) GetSign() []byte { + if x != nil { + return x.Sign + } + return nil +} +func (x *Signature) SetSign(v []byte) { + x.Sign = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *Signature) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *Signature) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Key = f + } + case "signature": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Sign = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type NodeInfo_Attribute struct { + Key string `json:"key"` + Value string `json:"value"` + Parents []string `json:"parents"` +} + +var ( + _ encoding.ProtoMarshaler = (*NodeInfo_Attribute)(nil) + _ encoding.ProtoUnmarshaler = (*NodeInfo_Attribute)(nil) + _ json.Marshaler = (*NodeInfo_Attribute)(nil) + _ json.Unmarshaler = (*NodeInfo_Attribute)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -50,28 +351,215 @@ func (x *NodeInfo_Attribute) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *NodeInfo_Attribute) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.StringMarshal(1, buf[offset:], x.Key) - offset += proto.StringMarshal(2, buf[offset:], x.Value) - offset += proto.RepeatedStringMarshal(3, buf[offset:], x.Parents) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *NodeInfo_Attribute) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *NodeInfo_Attribute) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendString(1, x.Key) + } + if len(x.Value) != 0 { + mm.AppendString(2, x.Value) + } + for j := range x.Parents { + mm.AppendString(3, x.Parents[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *NodeInfo_Attribute) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "NodeInfo_Attribute") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Value + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Value") + } + x.Value = data + case 3: // Parents + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Parents") + } + x.Parents = append(x.Parents, data) + } + } + return nil +} +func (x *NodeInfo_Attribute) GetKey() string { + if x != nil { + return x.Key + } + return "" +} +func (x *NodeInfo_Attribute) SetKey(v string) { + x.Key = v +} +func (x *NodeInfo_Attribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} +func (x *NodeInfo_Attribute) SetValue(v string) { + x.Value = v +} +func (x *NodeInfo_Attribute) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} +func (x *NodeInfo_Attribute) SetParents(v []string) { + x.Parents = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *NodeInfo_Attribute) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + out.String(x.Key) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" + out.RawString(prefix) + out.String(x.Value) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parents\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Parents { + if i != 0 { + out.RawByte(',') + } + out.String(x.Parents[i]) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *NodeInfo_Attribute) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f string + f = in.String() + x.Key = f + } + case "value": + { + var f string + f = in.String() + x.Value = f + } + case "parents": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Parents = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type NodeInfo struct { + PublicKey []byte `json:"publicKey"` + Addresses []string `json:"addresses"` + Attributes []NodeInfo_Attribute `json:"attributes"` + State NetmapStatus `json:"state"` +} + +var ( + _ encoding.ProtoMarshaler = (*NodeInfo)(nil) + _ encoding.ProtoUnmarshaler = (*NodeInfo)(nil) + _ json.Marshaler = (*NodeInfo)(nil) + _ json.Unmarshaler = (*NodeInfo)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -82,37 +570,303 @@ func (x *NodeInfo) StableSize() (size int) { size += proto.BytesSize(1, x.PublicKey) size += proto.RepeatedStringSize(2, x.Addresses) for i := range x.Attributes { - size += proto.NestedStructureSize(3, x.Attributes[i]) + size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i]) } size += proto.EnumSize(4, int32(x.State)) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *NodeInfo) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.PublicKey) - offset += proto.RepeatedStringMarshal(2, buf[offset:], x.Addresses) - for i := range x.Attributes { - offset += proto.NestedStructureMarshal(3, buf[offset:], x.Attributes[i]) - } - offset += proto.EnumMarshal(4, buf[offset:], int32(x.State)) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *NodeInfo) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.PublicKey) != 0 { + mm.AppendBytes(1, x.PublicKey) + } + for j := range x.Addresses { + mm.AppendString(2, x.Addresses[j]) + } + for i := range x.Attributes { + x.Attributes[i].EmitProtobuf(mm.AppendMessage(3)) + } + if int32(x.State) != 0 { + mm.AppendInt32(4, int32(x.State)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "NodeInfo") + } + switch fc.FieldNum { + case 1: // PublicKey + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "PublicKey") + } + x.PublicKey = data + case 2: // Addresses + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Addresses") + } + x.Addresses = append(x.Addresses, data) + case 3: // Attributes + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Attributes") + } + x.Attributes = append(x.Attributes, NodeInfo_Attribute{}) + ff := &x.Attributes[len(x.Attributes)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 4: // State + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "State") + } + x.State = NetmapStatus(data) + } + } + return nil +} +func (x *NodeInfo) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} +func (x *NodeInfo) SetPublicKey(v []byte) { + x.PublicKey = v +} +func (x *NodeInfo) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} +func (x *NodeInfo) SetAddresses(v []string) { + x.Addresses = v +} +func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute { + if x != nil { + return x.Attributes + } + return nil +} +func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) { + x.Attributes = v +} +func (x *NodeInfo) GetState() NetmapStatus { + if x != nil { + return x.State + } + return 0 +} +func (x *NodeInfo) SetState(v NetmapStatus) { + x.State = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *NodeInfo) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"publicKey\":" + out.RawString(prefix) + if x.PublicKey != nil { + out.Base64Bytes(x.PublicKey) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"addresses\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Addresses { + if i != 0 { + out.RawByte(',') + } + out.String(x.Addresses[i]) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"attributes\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Attributes { + if i != 0 { + out.RawByte(',') + } + x.Attributes[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"state\":" + out.RawString(prefix) + v := int32(x.State) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *NodeInfo) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "publicKey": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.PublicKey = f + } + case "addresses": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Addresses = list + in.Delim(']') + } + case "attributes": + { + var f NodeInfo_Attribute + var list []NodeInfo_Attribute + in.Delim('[') + for !in.IsDelim(']') { + f = NodeInfo_Attribute{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Attributes = list + in.Delim(']') + } + case "state": + { + var f NetmapStatus + var parsedValue NetmapStatus + switch v := in.Interface().(type) { + case string: + if vv, ok := NetmapStatus_value[v]; ok { + parsedValue = NetmapStatus(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = NetmapStatus(vv) + case float64: + parsedValue = NetmapStatus(v) + } + f = parsedValue + x.State = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type Netmap struct { + Epoch uint64 `json:"epoch"` + Nodes []NodeInfo `json:"nodes"` +} + +var ( + _ encoding.ProtoMarshaler = (*Netmap)(nil) + _ encoding.ProtoUnmarshaler = (*Netmap)(nil) + _ json.Marshaler = (*Netmap)(nil) + _ json.Unmarshaler = (*Netmap)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -122,34 +876,205 @@ func (x *Netmap) StableSize() (size int) { } size += proto.UInt64Size(1, x.Epoch) for i := range x.Nodes { - size += proto.NestedStructureSize(2, x.Nodes[i]) + size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *Netmap) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt64Marshal(1, buf[offset:], x.Epoch) - for i := range x.Nodes { - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Nodes[i]) - } - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *Netmap) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Epoch != 0 { + mm.AppendUint64(1, x.Epoch) + } + for i := range x.Nodes { + x.Nodes[i].EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "Netmap") + } + switch fc.FieldNum { + case 1: // Epoch + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Epoch") + } + x.Epoch = data + case 2: // Nodes + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Nodes") + } + x.Nodes = append(x.Nodes, NodeInfo{}) + ff := &x.Nodes[len(x.Nodes)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *Netmap) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} +func (x *Netmap) SetEpoch(v uint64) { + x.Epoch = v +} +func (x *Netmap) GetNodes() []NodeInfo { + if x != nil { + return x.Nodes + } + return nil +} +func (x *Netmap) SetNodes(v []NodeInfo) { + x.Nodes = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *Netmap) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"epoch\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Nodes { + if i != 0 { + out.RawByte(',') + } + x.Nodes[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *Netmap) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "epoch": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Epoch = f + } + case "nodes": + { + var f NodeInfo + var list []NodeInfo + in.Delim('[') + for !in.IsDelim(']') { + f = NodeInfo{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Nodes = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ShardInfo struct { + Shard_ID []byte `json:"shardID"` + MetabasePath string `json:"metabasePath"` + Blobstor []BlobstorInfo `json:"blobstor"` + WritecachePath string `json:"writecachePath"` + Mode ShardMode `json:"mode"` + ErrorCount uint32 `json:"errorCount"` + PiloramaPath string `json:"piloramaPath"` + EvacuationInProgress bool `json:"evacuationInProgress"` +} + +var ( + _ encoding.ProtoMarshaler = (*ShardInfo)(nil) + _ encoding.ProtoUnmarshaler = (*ShardInfo)(nil) + _ json.Marshaler = (*ShardInfo)(nil) + _ json.Unmarshaler = (*ShardInfo)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -160,43 +1085,437 @@ func (x *ShardInfo) StableSize() (size int) { size += proto.BytesSize(1, x.Shard_ID) size += proto.StringSize(2, x.MetabasePath) for i := range x.Blobstor { - size += proto.NestedStructureSize(3, x.Blobstor[i]) + size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i]) } size += proto.StringSize(4, x.WritecachePath) size += proto.EnumSize(5, int32(x.Mode)) size += proto.UInt32Size(6, x.ErrorCount) size += proto.StringSize(7, x.PiloramaPath) + size += proto.BoolSize(8, x.EvacuationInProgress) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ShardInfo) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID) - offset += proto.StringMarshal(2, buf[offset:], x.MetabasePath) - for i := range x.Blobstor { - offset += proto.NestedStructureMarshal(3, buf[offset:], x.Blobstor[i]) - } - offset += proto.StringMarshal(4, buf[offset:], x.WritecachePath) - offset += proto.EnumMarshal(5, buf[offset:], int32(x.Mode)) - offset += proto.UInt32Marshal(6, buf[offset:], x.ErrorCount) - offset += proto.StringMarshal(7, buf[offset:], x.PiloramaPath) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ShardInfo) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Shard_ID) != 0 { + mm.AppendBytes(1, x.Shard_ID) + } + if len(x.MetabasePath) != 0 { + mm.AppendString(2, x.MetabasePath) + } + for i := range x.Blobstor { + x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3)) + } + if len(x.WritecachePath) != 0 { + mm.AppendString(4, x.WritecachePath) + } + if int32(x.Mode) != 0 { + mm.AppendInt32(5, int32(x.Mode)) + } + if x.ErrorCount != 0 { + mm.AppendUint32(6, x.ErrorCount) + } + if len(x.PiloramaPath) != 0 { + mm.AppendString(7, x.PiloramaPath) + } + if x.EvacuationInProgress { + mm.AppendBool(8, x.EvacuationInProgress) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ShardInfo") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = data + case 2: // MetabasePath + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "MetabasePath") + } + x.MetabasePath = data + case 3: // Blobstor + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Blobstor") + } + x.Blobstor = append(x.Blobstor, BlobstorInfo{}) + ff := &x.Blobstor[len(x.Blobstor)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 4: // WritecachePath + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "WritecachePath") + } + x.WritecachePath = data + case 5: // Mode + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Mode") + } + x.Mode = ShardMode(data) + case 6: // ErrorCount + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ErrorCount") + } + x.ErrorCount = data + case 7: // PiloramaPath + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath") + } + x.PiloramaPath = data + case 8: // EvacuationInProgress + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress") + } + x.EvacuationInProgress = data + } + } + return nil +} +func (x *ShardInfo) GetShard_ID() []byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *ShardInfo) SetShard_ID(v []byte) { + x.Shard_ID = v +} +func (x *ShardInfo) GetMetabasePath() string { + if x != nil { + return x.MetabasePath + } + return "" +} +func (x *ShardInfo) SetMetabasePath(v string) { + x.MetabasePath = v +} +func (x *ShardInfo) GetBlobstor() []BlobstorInfo { + if x != nil { + return x.Blobstor + } + return nil +} +func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) { + x.Blobstor = v +} +func (x *ShardInfo) GetWritecachePath() string { + if x != nil { + return x.WritecachePath + } + return "" +} +func (x *ShardInfo) SetWritecachePath(v string) { + x.WritecachePath = v +} +func (x *ShardInfo) GetMode() ShardMode { + if x != nil { + return x.Mode + } + return 0 +} +func (x *ShardInfo) SetMode(v ShardMode) { + x.Mode = v +} +func (x *ShardInfo) GetErrorCount() uint32 { + if x != nil { + return x.ErrorCount + } + return 0 +} +func (x *ShardInfo) SetErrorCount(v uint32) { + x.ErrorCount = v +} +func (x *ShardInfo) GetPiloramaPath() string { + if x != nil { + return x.PiloramaPath + } + return "" +} +func (x *ShardInfo) SetPiloramaPath(v string) { + x.PiloramaPath = v +} +func (x *ShardInfo) GetEvacuationInProgress() bool { + if x != nil { + return x.EvacuationInProgress + } + return false +} +func (x *ShardInfo) SetEvacuationInProgress(v bool) { + x.EvacuationInProgress = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ShardInfo) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"metabasePath\":" + out.RawString(prefix) + out.String(x.MetabasePath) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"blobstor\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Blobstor { + if i != 0 { + out.RawByte(',') + } + x.Blobstor[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"writecachePath\":" + out.RawString(prefix) + out.String(x.WritecachePath) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"mode\":" + out.RawString(prefix) + v := int32(x.Mode) + if vv, ok := ShardMode_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"errorCount\":" + out.RawString(prefix) + out.Uint32(x.ErrorCount) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"piloramaPath\":" + out.RawString(prefix) + out.String(x.PiloramaPath) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuationInProgress\":" + out.RawString(prefix) + out.Bool(x.EvacuationInProgress) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ShardInfo) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Shard_ID = f + } + case "metabasePath": + { + var f string + f = in.String() + x.MetabasePath = f + } + case "blobstor": + { + var f BlobstorInfo + var list []BlobstorInfo + in.Delim('[') + for !in.IsDelim(']') { + f = BlobstorInfo{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Blobstor = list + in.Delim(']') + } + case "writecachePath": + { + var f string + f = in.String() + x.WritecachePath = f + } + case "mode": + { + var f ShardMode + var parsedValue ShardMode + switch v := in.Interface().(type) { + case string: + if vv, ok := ShardMode_value[v]; ok { + parsedValue = ShardMode(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = ShardMode(vv) + case float64: + parsedValue = ShardMode(v) + } + f = parsedValue + x.Mode = f + } + case "errorCount": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.ErrorCount = f + } + case "piloramaPath": + { + var f string + f = in.String() + x.PiloramaPath = f + } + case "evacuationInProgress": + { + var f bool + f = in.Bool() + x.EvacuationInProgress = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type BlobstorInfo struct { + Path string `json:"path"` + Type string `json:"type"` +} + +var ( + _ encoding.ProtoMarshaler = (*BlobstorInfo)(nil) + _ encoding.ProtoUnmarshaler = (*BlobstorInfo)(nil) + _ json.Marshaler = (*BlobstorInfo)(nil) + _ json.Unmarshaler = (*BlobstorInfo)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -209,27 +1528,206 @@ func (x *BlobstorInfo) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *BlobstorInfo) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.StringMarshal(1, buf[offset:], x.Path) - offset += proto.StringMarshal(2, buf[offset:], x.Type) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *BlobstorInfo) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *BlobstorInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Path) != 0 { + mm.AppendString(1, x.Path) + } + if len(x.Type) != 0 { + mm.AppendString(2, x.Type) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *BlobstorInfo) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "BlobstorInfo") + } + switch fc.FieldNum { + case 1: // Path + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Path") + } + x.Path = data + case 2: // Type + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Type") + } + x.Type = data + } + } + return nil +} +func (x *BlobstorInfo) GetPath() string { + if x != nil { + return x.Path + } + return "" +} +func (x *BlobstorInfo) SetPath(v string) { + x.Path = v +} +func (x *BlobstorInfo) GetType() string { + if x != nil { + return x.Type + } + return "" +} +func (x *BlobstorInfo) SetType(v string) { + x.Type = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *BlobstorInfo) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" + out.RawString(prefix) + out.String(x.Path) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"type\":" + out.RawString(prefix) + out.String(x.Type) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *BlobstorInfo) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *BlobstorInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "path": + { + var f string + f = in.String() + x.Path = f + } + case "type": + { + var f string + f = in.String() + x.Type = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ChainTarget_TargetType int32 + +const ( + ChainTarget_UNDEFINED ChainTarget_TargetType = 0 + ChainTarget_NAMESPACE ChainTarget_TargetType = 1 + ChainTarget_CONTAINER ChainTarget_TargetType = 2 + ChainTarget_USER ChainTarget_TargetType = 3 + ChainTarget_GROUP ChainTarget_TargetType = 4 +) + +var ( + ChainTarget_TargetType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "NAMESPACE", + 2: "CONTAINER", + 3: "USER", + 4: "GROUP", + } + ChainTarget_TargetType_value = map[string]int32{ + "UNDEFINED": 0, + "NAMESPACE": 1, + "CONTAINER": 2, + "USER": 3, + "GROUP": 4, + } +) + +func (x ChainTarget_TargetType) String() string { + if v, ok := ChainTarget_TargetType_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *ChainTarget_TargetType) FromString(s string) bool { + if v, ok := ChainTarget_TargetType_value[s]; ok { + *x = ChainTarget_TargetType(v) + return true + } + return false +} + +type ChainTarget struct { + Type ChainTarget_TargetType `json:"type"` + Name string `json:"Name"` +} + +var ( + _ encoding.ProtoMarshaler = (*ChainTarget)(nil) + _ encoding.ProtoUnmarshaler = (*ChainTarget)(nil) + _ json.Marshaler = (*ChainTarget)(nil) + _ json.Unmarshaler = (*ChainTarget)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -242,23 +1740,170 @@ func (x *ChainTarget) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ChainTarget) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.Type)) - offset += proto.StringMarshal(2, buf[offset:], x.Name) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ChainTarget) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ChainTarget) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.Type) != 0 { + mm.AppendInt32(1, int32(x.Type)) + } + if len(x.Name) != 0 { + mm.AppendString(2, x.Name) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ChainTarget) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ChainTarget") + } + switch fc.FieldNum { + case 1: // Type + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Type") + } + x.Type = ChainTarget_TargetType(data) + case 2: // Name + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Name") + } + x.Name = data + } + } + return nil +} +func (x *ChainTarget) GetType() ChainTarget_TargetType { + if x != nil { + return x.Type + } + return 0 +} +func (x *ChainTarget) SetType(v ChainTarget_TargetType) { + x.Type = v +} +func (x *ChainTarget) GetName() string { + if x != nil { + return x.Name + } + return "" +} +func (x *ChainTarget) SetName(v string) { + x.Name = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ChainTarget) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"type\":" + out.RawString(prefix) + v := int32(x.Type) + if vv, ok := ChainTarget_TargetType_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"Name\":" + out.RawString(prefix) + out.String(x.Name) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ChainTarget) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ChainTarget) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + { + var f ChainTarget_TargetType + var parsedValue ChainTarget_TargetType + switch v := in.Interface().(type) { + case string: + if vv, ok := ChainTarget_TargetType_value[v]; ok { + parsedValue = ChainTarget_TargetType(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = ChainTarget_TargetType(vv) + case float64: + parsedValue = ChainTarget_TargetType(v) + } + f = parsedValue + x.Type = f + } + case "Name": + { + var f string + f = in.String() + x.Name = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/control/types_test.go b/pkg/services/control/types_test.go deleted file mode 100644 index df0cdf141..000000000 --- a/pkg/services/control/types_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package control_test - -import ( - "bytes" - "path/filepath" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "github.com/google/uuid" -) - -func TestNetmap_StableMarshal(t *testing.T) { - testStableMarshal(t, generateNetmap(), new(control.Netmap), func(m1, m2 protoMessage) bool { - return equalNetmaps(m1.(*control.Netmap), m2.(*control.Netmap)) - }) -} - -func generateNetmap() *control.Netmap { - nm := new(control.Netmap) - nm.SetEpoch(13) - - const nodeCount = 2 - - nodes := make([]*control.NodeInfo, 0, nodeCount) - - for i := 0; i < nodeCount; i++ { - n := new(control.NodeInfo) - n.SetPublicKey(testData(33)) - n.SetAddresses([]string{testString(), testString()}) - n.SetState(control.NetmapStatus_ONLINE) - - const attrCount = 2 - - attrs := make([]*control.NodeInfo_Attribute, 0, attrCount) - - for j := 0; j < attrCount; j++ { - a := new(control.NodeInfo_Attribute) - a.SetKey(testString()) - a.SetValue(testString()) - - const parentsCount = 2 - - parents := make([]string, 0, parentsCount) - - for k := 0; k < parentsCount; k++ { - parents = append(parents, testString()) - } - - a.SetParents(parents) - - attrs = append(attrs, a) - } - - n.SetAttributes(attrs) - - nodes = append(nodes, n) - } - - nm.SetNodes(nodes) - - return nm -} - -func equalNetmaps(nm1, nm2 *control.Netmap) bool { - if nm1.GetEpoch() != nm2.GetEpoch() { - return false - } - - n1, n2 := nm1.GetNodes(), nm2.GetNodes() - - if len(n1) != len(n2) { - return false - } - - for i := range n1 { - if !equalNodeInfos(n1[i], n2[i]) { - return false - } - } - - return true -} - -func equalNodeInfos(n1, n2 *control.NodeInfo) bool { - if !bytes.Equal(n1.GetPublicKey(), n2.GetPublicKey()) || - n1.GetState() != n2.GetState() { - return false - } - - na1, na2 := n1.GetAddresses(), n2.GetAddresses() - - if len(na1) != len(na2) { - return false - } - - for i := range na1 { - if na1[i] != na2[i] { - return false - } - } - - a1, a2 := n1.GetAttributes(), n2.GetAttributes() - - if len(a1) != len(a2) { - return false - } - - for i := range a1 { - if a1[i].GetKey() != a2[i].GetKey() || a1[i].GetValue() != a2[i].GetValue() { - return false - } - - p1, p2 := a1[i].GetParents(), a2[i].GetParents() - - if len(p1) != len(p2) { - return false - } - - for j := range p1 { - if p1[j] != p2[j] { - return false - } - } - } - - return true -} - -func generateShardInfo(id int) *control.ShardInfo { - si := new(control.ShardInfo) - - path := "/nice/dir/awesome/files/" + strconv.Itoa(id) - - uid, _ := uuid.NewRandom() - bin, _ := uid.MarshalBinary() - - si.SetID(bin) - si.SetMode(control.ShardMode_READ_WRITE) - si.SetMetabasePath(filepath.Join(path, "meta")) - si.Blobstor = []*control.BlobstorInfo{ - {Type: fstree.Type, Path: filepath.Join(path, "fstree")}, - {Type: blobovniczatree.Type, Path: filepath.Join(path, "blobtree")}, - } - si.SetWriteCachePath(filepath.Join(path, "writecache")) - si.SetPiloramaPath(filepath.Join(path, "pilorama")) - - return si -} diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 496b07a98..1b92fdaad 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,10 +5,11 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" ) @@ -26,9 +27,9 @@ type executorSvc struct { // NodeState encapsulates information // about current node state. type NodeState interface { - // Must return current node state + // LocalNodeInfo must return current node state // in FrostFS API v2 NodeInfo structure. - LocalNodeInfo() (*netmap.NodeInfo, error) + LocalNodeInfo() *netmapSDK.NodeInfo // ReadCurrentNetMap reads current local network map of the storage node // into the given parameter. Returns any error encountered which prevented @@ -39,17 +40,19 @@ type NodeState interface { // NetworkInfo encapsulates source of the // recent information about the FrostFS network. type NetworkInfo interface { - // Must return recent network information in FrostFS API v2 NetworkInfo structure. + // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. // // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. - Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error) + Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error) } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { - // this should never happen, otherwise it programmers bug - panic("can't create netmap execution service") - } + // this should never happen, otherwise it's a programmer's bug + msg := "BUG: can't create netmap execution service" + assert.False(s == nil, msg, "node state is nil") + assert.False(netInfo == nil, msg, "network info is nil") + assert.False(respSvc == nil, msg, "response service is nil") + assert.True(version.IsValid(v), msg, "invalid version") res := &executorSvc{ state: s, @@ -64,39 +67,15 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, func (s *executorSvc) LocalNodeInfo( _ context.Context, - req *netmap.LocalNodeInfoRequest, + _ *netmap.LocalNodeInfoRequest, ) (*netmap.LocalNodeInfoResponse, error) { - verV2 := req.GetMetaHeader().GetVersion() - if verV2 == nil { - return nil, errors.New("missing version") - } - - var ver versionsdk.Version - if err := ver.ReadFromV2(*verV2); err != nil { - return nil, fmt.Errorf("can't read version: %w", err) - } - - ni, err := s.state.LocalNodeInfo() - if err != nil { - return nil, err - } - - if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 { - ni2 := new(netmap.NodeInfo) - ni2.SetPublicKey(ni.GetPublicKey()) - ni2.SetState(ni.GetState()) - ni2.SetAttributes(ni.GetAttributes()) - ni.IterateAddresses(func(s string) bool { - ni2.SetAddresses(s) - return true - }) - - ni = ni2 - } + ni := s.state.LocalNodeInfo() + var nodeInfo netmap.NodeInfo + ni.WriteToV2(&nodeInfo) body := new(netmap.LocalNodeInfoResponseBody) body.SetVersion(&s.version) - body.SetNodeInfo(ni) + body.SetNodeInfo(&nodeInfo) resp := new(netmap.LocalNodeInfoResponse) resp.SetBody(body) @@ -106,7 +85,7 @@ func (s *executorSvc) LocalNodeInfo( } func (s *executorSvc) NetworkInfo( - _ context.Context, + ctx context.Context, req *netmap.NetworkInfoRequest, ) (*netmap.NetworkInfoResponse, error) { verV2 := req.GetMetaHeader().GetVersion() @@ -119,7 +98,7 @@ func (s *executorSvc) NetworkInfo( return nil, fmt.Errorf("can't read version: %w", err) } - ni, err := s.netInfo.Dump(ver) + ni, err := s.netInfo.Dump(ctx, ver) if err != nil { return nil, err } diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go index 0a09c9f44..eff880dbe 100644 --- a/pkg/services/netmap/server.go +++ b/pkg/services/netmap/server.go @@ -3,7 +3,7 @@ package netmap import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" ) // Server is an interface of the FrostFS API Netmap service server. diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go index 9a16ad8f1..5f184d5c0 100644 --- a/pkg/services/netmap/sign.go +++ b/pkg/services/netmap/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" ) type signService struct { diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go deleted file mode 100644 index 921545c8b..000000000 --- a/pkg/services/object/acl/acl.go +++ /dev/null @@ -1,262 +0,0 @@ -package acl - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" - bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// Checker implements v2.ACLChecker interfaces and provides -// ACL/eACL validation functionality. -type Checker struct { - eaclSrc container.EACLSource - validator *eaclSDK.Validator - localStorage *engine.StorageEngine - state netmap.State -} - -type localStorage struct { - ls *engine.StorageEngine -} - -func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - if s.ls == nil { - return nil, io.ErrUnexpectedEOF - } - - return engine.Head(ctx, s.ls, addr) -} - -// Various EACL check errors. -var ( - errEACLDeniedByRule = errors.New("denied by rule") - errBearerExpired = errors.New("bearer token has expired") - errBearerInvalidSignature = errors.New("bearer token has invalid signature") - errBearerInvalidContainerID = errors.New("bearer token was created for another container") - errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") - errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") -) - -// NewChecker creates Checker. -// Panics if at least one of the parameter is nil. -func NewChecker( - state netmap.State, - eaclSrc container.EACLSource, - validator *eaclSDK.Validator, - localStorage *engine.StorageEngine, -) *Checker { - return &Checker{ - eaclSrc: eaclSrc, - validator: validator, - localStorage: localStorage, - state: state, - } -} - -// CheckBasicACL is a main check function for basic ACL. -func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool { - // check basic ACL permissions - return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole()) -} - -// StickyBitCheck validates owner field in the request if sticky bit is enabled. -func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool { - // According to FrostFS specification sticky bit has no effect on system nodes - // for correct intra-container work with objects (in particular, replication). - if info.RequestRole() == acl.RoleContainer { - return true - } - - if !info.BasicACL().Sticky() { - return true - } - - if len(info.SenderKey()) == 0 { - return false - } - - requestSenderKey := unmarshalPublicKey(info.SenderKey()) - - return isOwnerFromKey(owner, requestSenderKey) -} - -// CheckEACL is a main check function for extended ACL. -func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error { - basicACL := reqInfo.BasicACL() - if !basicACL.Extendable() { - return nil - } - - bearerTok := reqInfo.Bearer() - impersonate := bearerTok != nil && bearerTok.Impersonate() - - // if bearer token is not allowed, then ignore it - if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) { - reqInfo.CleanBearer() - } - - var table eaclSDK.Table - cnr := reqInfo.ContainerID() - - if bearerTok == nil { - eaclInfo, err := c.eaclSrc.GetEACL(cnr) - if err != nil { - if client.IsErrEACLNotFound(err) { - return nil - } - return err - } - - table = *eaclInfo.Value - } else { - table = bearerTok.EACLTable() - } - - // if bearer token is not present, isValidBearer returns true - if err := isValidBearer(reqInfo, c.state); err != nil { - return err - } - - hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo) - if err != nil { - return err - } - - eaclRole := getRole(reqInfo) - - action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit). - WithRole(eaclRole). - WithOperation(eaclSDK.Operation(reqInfo.Operation())). - WithContainerID(&cnr). - WithSenderKey(reqInfo.SenderKey()). - WithHeaderSource(hdrSrc). - WithEACLTable(&table), - ) - - if action != eaclSDK.ActionAllow { - return errEACLDeniedByRule - } - return nil -} - -func getRole(reqInfo v2.RequestInfo) eaclSDK.Role { - var eaclRole eaclSDK.Role - switch op := reqInfo.RequestRole(); op { - default: - eaclRole = eaclSDK.Role(op) - case acl.RoleOwner: - eaclRole = eaclSDK.RoleUser - case acl.RoleInnerRing, acl.RoleContainer: - eaclRole = eaclSDK.RoleSystem - case acl.RoleOthers: - eaclRole = eaclSDK.RoleOthers - } - return eaclRole -} - -func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) { - var xHeaderSource eaclV2.XHeaderSource - if req, ok := msg.(eaclV2.Request); ok { - xHeaderSource = eaclV2.NewRequestXHeaderSource(req) - } else { - xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request)) - } - - hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID())) - if err != nil { - return nil, fmt.Errorf("can't parse headers: %w", err) - } - return hdrSrc, nil -} - -// isValidBearer checks whether bearer token was correctly signed by authorized -// entity. This method might be defined on whole ACL service because it will -// require fetching current epoch to check lifetime. -func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error { - ownerCnr := reqInfo.ContainerOwner() - - token := reqInfo.Bearer() - - // 0. Check if bearer token is present in reqInfo. - if token == nil { - return nil - } - - // 1. First check token lifetime. Simplest verification. - if token.InvalidAt(st.CurrentEpoch()) { - return errBearerExpired - } - - // 2. Then check if bearer token is signed correctly. - if !token.VerifySignature() { - return errBearerInvalidSignature - } - - // 3. Then check if container is either empty or equal to the container in the request. - cnr, isSet := token.EACLTable().CID() - if isSet && !cnr.Equals(reqInfo.ContainerID()) { - return errBearerInvalidContainerID - } - - // 4. Then check if container owner signed this token. - if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return errBearerNotSignedByOwner - } - - // 5. Then check if request sender has rights to use this token. - var keySender frostfsecdsa.PublicKey - - err := keySender.Decode(reqInfo.SenderKey()) - if err != nil { - return fmt.Errorf("decode sender public key: %w", err) - } - - var usrSender user.ID - user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender)) - - if !token.AssertUser(usrSender) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return errBearerInvalidOwner - } - - return nil -} - -func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { - if key == nil { - return false - } - - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) - - return id.Equals(id2) -} - -func unmarshalPublicKey(bs []byte) *keys.PublicKey { - pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256()) - if err != nil { - return nil - } - return pub -} diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go deleted file mode 100644 index d63cb1285..000000000 --- a/pkg/services/object/acl/acl_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package acl - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "github.com/stretchr/testify/require" -) - -type emptyEACLSource struct{} - -func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) { - return nil, nil -} - -type emptyNetmapState struct{} - -func (e emptyNetmapState) CurrentEpoch() uint64 { - return 0 -} - -func TestStickyCheck(t *testing.T) { - checker := NewChecker( - emptyNetmapState{}, - emptyEACLSource{}, - eaclSDK.NewValidator(), - &engine.StorageEngine{}) - - t.Run("system role", func(t *testing.T) { - var info v2.RequestInfo - - info.SetSenderKey(make([]byte, 33)) // any non-empty key - info.SetRequestRole(acl.RoleContainer) - - require.True(t, checker.StickyBitCheck(info, usertest.ID())) - - var basicACL acl.Basic - basicACL.MakeSticky() - - info.SetBasicACL(basicACL) - - require.True(t, checker.StickyBitCheck(info, usertest.ID())) - }) - - t.Run("owner ID and/or public key emptiness", func(t *testing.T) { - var info v2.RequestInfo - - info.SetRequestRole(acl.RoleOthers) // should be non-system role - - assertFn := func(isSticky, withKey, withOwner, expected bool) { - info := info - if isSticky { - var basicACL acl.Basic - basicACL.MakeSticky() - - info.SetBasicACL(basicACL) - } - - if withKey { - info.SetSenderKey(make([]byte, 33)) - } else { - info.SetSenderKey(nil) - } - - var ownerID user.ID - - if withOwner { - ownerID = usertest.ID() - } - - require.Equal(t, expected, checker.StickyBitCheck(info, ownerID)) - } - - assertFn(true, false, false, false) - assertFn(true, true, false, false) - assertFn(true, false, true, false) - assertFn(false, false, false, true) - assertFn(false, true, false, true) - assertFn(false, false, true, true) - assertFn(false, true, true, true) - }) -} diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go deleted file mode 100644 index 023b99239..000000000 --- a/pkg/services/object/acl/eacl/v2/eacl_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "context" - "crypto/ecdsa" - "errors" - "testing" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testLocalStorage struct { - t *testing.T - - expAddr oid.Address - - obj *objectSDK.Object - - err error -} - -func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(s.t, addr.Container().Equals(s.expAddr.Container())) - require.True(s.t, addr.Object().Equals(s.expAddr.Object())) - - return s.obj, s.err -} - -func testXHeaders(strs ...string) []session.XHeader { - res := make([]session.XHeader, len(strs)/2) - - for i := 0; i < len(strs); i += 2 { - res[i/2].SetKey(strs[i]) - res[i/2].SetValue(strs[i+1]) - } - - return res -} - -func TestHeadRequest(t *testing.T) { - req := new(objectV2.HeadRequest) - - meta := new(session.RequestMetaHeader) - req.SetMetaHeader(meta) - - body := new(objectV2.HeadRequestBody) - req.SetBody(body) - - addr := oidtest.Address() - - var addrV2 refs.Address - addr.WriteToV2(&addrV2) - - body.SetAddress(&addrV2) - - xKey := "x-key" - xVal := "x-val" - xHdrs := testXHeaders( - xKey, xVal, - ) - - meta.SetXHeaders(xHdrs) - - obj := objectSDK.New() - - attrKey := "attr_key" - attrVal := "attr_val" - var attr objectSDK.Attribute - attr.SetKey(attrKey) - attr.SetValue(attrVal) - obj.SetAttributes(attr) - - table := new(eaclSDK.Table) - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - senderKey := priv.PublicKey() - - r := eaclSDK.NewRecord() - r.SetOperation(eaclSDK.OperationHead) - r.SetAction(eaclSDK.ActionDeny) - r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) - r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) - eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table.AddRecord(r) - - lStorage := &testLocalStorage{ - t: t, - expAddr: addr, - obj: obj, - } - - id := addr.Object() - - newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { - hdrSrc, err := NewMessageHeaderSource( - lStorage, - NewRequestXHeaderSource(req), - addr.Container(), - WithOID(&id)) - require.NoError(t, err) - return hdrSrc - } - - cnr := addr.Container() - - unit := new(eaclSDK.ValidationUnit). - WithContainerID(&cnr). - WithOperation(eaclSDK.OperationHead). - WithSenderKey(senderKey.Bytes()). - WithEACLTable(table) - - validator := eaclSDK.NewValidator() - - checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(nil) - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(xHdrs) - - obj.SetAttributes() - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - lStorage.err = errors.New("any error") - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - r.SetAction(eaclSDK.ActionAllow) - - rID := eaclSDK.NewRecord() - rID.SetOperation(eaclSDK.OperationHead) - rID.SetAction(eaclSDK.ActionDeny) - rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) - eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table = eaclSDK.NewTable() - table.AddRecord(r) - table.AddRecord(rID) - - unit.WithEACLTable(table) - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) -} - -func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.True(t, fromRule) - require.Equal(t, expected, actual) -} - -func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.False(t, fromRule) - require.Equal(t, eaclSDK.ActionAllow, actual) -} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go deleted file mode 100644 index 34975e1e6..000000000 --- a/pkg/services/object/acl/eacl/v2/headers.go +++ /dev/null @@ -1,246 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type Option func(*cfg) - -type cfg struct { - storage ObjectStorage - - msg XHeaderSource - - cnr cid.ID - obj *oid.ID -} - -type ObjectStorage interface { - Head(context.Context, oid.Address) (*objectSDK.Object, error) -} - -type Request interface { - GetMetaHeader() *session.RequestMetaHeader -} - -type Response interface { - GetMetaHeader() *session.ResponseMetaHeader -} - -type headerSource struct { - requestHeaders []eaclSDK.Header - objectHeaders []eaclSDK.Header - - incompleteObjectHeaders bool -} - -func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { - cfg := &cfg{ - storage: os, - cnr: cnrID, - msg: xhs, - } - - for i := range opts { - opts[i](cfg) - } - - if cfg.msg == nil { - return nil, errors.New("message is not provided") - } - - var res headerSource - - err := cfg.readObjectHeaders(&res) - if err != nil { - return nil, err - } - - res.requestHeaders = cfg.msg.GetXHeaders() - - return res, nil -} - -func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { - switch typ { - default: - return nil, true - case eaclSDK.HeaderFromRequest: - return h.requestHeaders, true - case eaclSDK.HeaderFromObject: - return h.objectHeaders, !h.incompleteObjectHeaders - } -} - -type xHeader session.XHeader - -func (x xHeader) Key() string { - return (*session.XHeader)(&x).GetKey() -} - -func (x xHeader) Value() string { - return (*session.XHeader)(&x).GetValue() -} - -var errMissingOID = errors.New("object ID is missing") - -func (h *cfg) readObjectHeaders(dst *headerSource) error { - switch m := h.msg.(type) { - default: - panic(fmt.Sprintf("unexpected message type %T", h.msg)) - case requestXHeaderSource: - return h.readObjectHeadersFromRequestXHeaderSource(m, dst) - case responseXHeaderSource: - return h.readObjectHeadersResponseXHeaderSource(m, dst) - } -} - -func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { - switch req := m.req.(type) { - case - *objectV2.GetRequest, - *objectV2.HeadRequest: - if h.obj == nil { - return errMissingOID - } - - objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objHeaders - dst.incompleteObjectHeaders = !completed - case - *objectV2.GetRangeRequest, - *objectV2.GetRangeHashRequest, - *objectV2.DeleteRequest: - if h.obj == nil { - return errMissingOID - } - - dst.objectHeaders = addressHeaders(h.cnr, h.obj) - case *objectV2.PutRequest: - if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.PutSingleRequest: - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) - case *objectV2.SearchRequest: - cnrV2 := req.GetBody().GetContainerID() - var cnr cid.ID - - if cnrV2 != nil { - if err := cnr.ReadFromV2(*cnrV2); err != nil { - return fmt.Errorf("can't parse container ID: %w", err) - } - } - - dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} - } - return nil -} - -func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { - switch resp := m.resp.(type) { - default: - objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objectHeaders - dst.incompleteObjectHeaders = !completed - case *objectV2.GetResponse: - if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.HeadResponse: - oV2 := new(objectV2.Object) - - var hdr *objectV2.Header - - switch v := resp.GetBody().GetHeaderPart().(type) { - case *objectV2.ShortHeader: - hdr = new(objectV2.Header) - - var idV2 refsV2.ContainerID - h.cnr.WriteToV2(&idV2) - - hdr.SetContainerID(&idV2) - hdr.SetVersion(v.GetVersion()) - hdr.SetCreationEpoch(v.GetCreationEpoch()) - hdr.SetOwnerID(v.GetOwnerID()) - hdr.SetObjectType(v.GetObjectType()) - hdr.SetPayloadLength(v.GetPayloadLength()) - case *objectV2.HeaderWithSignature: - hdr = v.GetHeader() - } - - oV2.SetHeader(hdr) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - return nil -} - -func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { - if idObj != nil { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*idObj) - - obj, err := h.storage.Head(context.TODO(), addr) - if err == nil { - return headersFromObject(obj, cnr, idObj), true - } - } - - return addressHeaders(cnr, idObj), false -} - -func cidHeader(idCnr cid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectContainerID, - v: idCnr.EncodeToString(), - } -} - -func oidHeader(obj oid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectID, - v: obj.EncodeToString(), - } -} - -func ownerIDHeader(ownerID user.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectOwnerID, - v: ownerID.EncodeToString(), - } -} - -func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - hh := make([]eaclSDK.Header, 0, 2) - hh = append(hh, cidHeader(cnr)) - - if oid != nil { - hh = append(hh, oidHeader(*oid)) - } - - return hh -} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go deleted file mode 100644 index 72bd4c2d2..000000000 --- a/pkg/services/object/acl/eacl/v2/object.go +++ /dev/null @@ -1,92 +0,0 @@ -package v2 - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type sysObjHdr struct { - k, v string -} - -func (s sysObjHdr) Key() string { - return s.k -} - -func (s sysObjHdr) Value() string { - return s.v -} - -func u64Value(v uint64) string { - return strconv.FormatUint(v, 10) -} - -func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - var count int - for obj := obj; obj != nil; obj = obj.Parent() { - count += 9 + len(obj.Attributes()) - } - - res := make([]eaclSDK.Header, 0, count) - for ; obj != nil; obj = obj.Parent() { - res = append(res, - cidHeader(cnr), - // creation epoch - sysObjHdr{ - k: acl.FilterObjectCreationEpoch, - v: u64Value(obj.CreationEpoch()), - }, - // payload size - sysObjHdr{ - k: acl.FilterObjectPayloadLength, - v: u64Value(obj.PayloadSize()), - }, - // object version - sysObjHdr{ - k: acl.FilterObjectVersion, - v: obj.Version().String(), - }, - // object type - sysObjHdr{ - k: acl.FilterObjectType, - v: obj.Type().String(), - }, - ) - - if oid != nil { - res = append(res, oidHeader(*oid)) - } - - if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { - res = append(res, ownerIDHeader(idOwner)) - } - - cs, ok := obj.PayloadChecksum() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectPayloadHash, - v: cs.String(), - }) - } - - cs, ok = obj.PayloadHomomorphicHash() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectHomomorphicHash, - v: cs.String(), - }) - } - - attrs := obj.Attributes() - for i := range attrs { - res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface - } - } - - return res -} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go deleted file mode 100644 index d91a21c75..000000000 --- a/pkg/services/object/acl/eacl/v2/opts.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func WithOID(v *oid.ID) Option { - return func(c *cfg) { - c.obj = v - } -} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go deleted file mode 100644 index c1fdea9d8..000000000 --- a/pkg/services/object/acl/eacl/v2/xheader.go +++ /dev/null @@ -1,69 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" -) - -type XHeaderSource interface { - GetXHeaders() []eaclSDK.Header -} - -type requestXHeaderSource struct { - req Request -} - -func NewRequestXHeaderSource(req Request) XHeaderSource { - return requestXHeaderSource{req: req} -} - -type responseXHeaderSource struct { - resp Response - - req Request -} - -func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { - return responseXHeaderSource{resp: resp, req: req} -} - -func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - ln += len(meta.GetXHeaders()) - } - - res := make([]eaclSDK.Header, 0, ln) - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - for i := range x { - res = append(res, (xHeader)(x[i])) - } - } - - return res -} - -func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - xHdrs := make([][]session.XHeader, 0) - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - - ln += len(x) - - xHdrs = append(xHdrs, x) - } - - res := make([]eaclSDK.Header, 0, ln) - - for i := range xHdrs { - for j := range xHdrs[i] { - res = append(res, xHeader(xHdrs[i][j])) - } - } - - return res -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go deleted file mode 100644 index 11b9e6e5f..000000000 --- a/pkg/services/object/acl/v2/errors.go +++ /dev/null @@ -1,41 +0,0 @@ -package v2 - -import ( - "fmt" - - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -const invalidRequestMessage = "malformed request" - -func malformedRequestError(reason string) error { - return fmt.Errorf("%s: %s", invalidRequestMessage, reason) -} - -var ( - errEmptyBody = malformedRequestError("empty body") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) - -const ( - accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check" - accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v" -) - -func basicACLErr(info RequestInfo) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation)) - - return errAccessDenied -} - -func eACLErr(info RequestInfo, err error) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err)) - - return errAccessDenied -} diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go deleted file mode 100644 index 2d2b7bc8d..000000000 --- a/pkg/services/object/acl/v2/errors_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package v2 - -import ( - "errors" - "testing" - - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" -) - -func TestBasicACLErr(t *testing.T) { - var reqInfo RequestInfo - err := basicACLErr(reqInfo) - - var errAccessDenied *apistatus.ObjectAccessDenied - - require.ErrorAs(t, err, &errAccessDenied, - "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied") -} - -func TestEACLErr(t *testing.T) { - var reqInfo RequestInfo - testErr := errors.New("test-eacl") - err := eACLErr(reqInfo, testErr) - - var errAccessDenied *apistatus.ObjectAccessDenied - - require.ErrorAs(t, err, &errAccessDenied, - "eACLErr must be able to be casted to apistatus.ObjectAccessDenied") -} diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go deleted file mode 100644 index 15fcce884..000000000 --- a/pkg/services/object/acl/v2/opts.go +++ /dev/null @@ -1,12 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// WithLogger returns option to set logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go deleted file mode 100644 index 74279e453..000000000 --- a/pkg/services/object/acl/v2/request.go +++ /dev/null @@ -1,159 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "fmt" - - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// RequestInfo groups parsed version-independent (from SDK library) -// request information and raw API request. -type RequestInfo struct { - basicACL acl.Basic - requestRole acl.Role - operation acl.Op // put, get, head, etc. - cnrOwner user.ID // container owner - - // cnrNamespace defined to which namespace a container is belonged. - cnrNamespace string - - idCnr cid.ID - - // optional for some request - // e.g. Put, Search - obj *oid.ID - - senderKey []byte - - bearer *bearer.Token // bearer token of request - - srcRequest any -} - -func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { - r.basicACL = basicACL -} - -func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { - r.requestRole = requestRole -} - -func (r *RequestInfo) SetSenderKey(senderKey []byte) { - r.senderKey = senderKey -} - -// Request returns raw API request. -func (r RequestInfo) Request() any { - return r.srcRequest -} - -// ContainerOwner returns owner if the container. -func (r RequestInfo) ContainerOwner() user.ID { - return r.cnrOwner -} - -func (r RequestInfo) ContainerNamespace() string { - return r.cnrNamespace -} - -// ObjectID return object ID. -func (r RequestInfo) ObjectID() *oid.ID { - return r.obj -} - -// ContainerID return container ID. -func (r RequestInfo) ContainerID() cid.ID { - return r.idCnr -} - -// CleanBearer forces cleaning bearer token information. -func (r *RequestInfo) CleanBearer() { - r.bearer = nil -} - -// Bearer returns bearer token of the request. -func (r RequestInfo) Bearer() *bearer.Token { - return r.bearer -} - -// BasicACL returns basic ACL of the container. -func (r RequestInfo) BasicACL() acl.Basic { - return r.basicACL -} - -// SenderKey returns public key of the request's sender. -func (r RequestInfo) SenderKey() []byte { - return r.senderKey -} - -// Operation returns request's operation. -func (r RequestInfo) Operation() acl.Op { - return r.operation -} - -// RequestRole returns request sender's role. -func (r RequestInfo) RequestRole() acl.Role { - return r.requestRole -} - -// IsSoftAPECheck states if APE should perform soft checks. -// Soft APE check allows a request if CheckAPE returns NoRuleFound for it, -// otherwise it denies the request. -func (r RequestInfo) IsSoftAPECheck() bool { - return r.BasicACL().Bits() != 0 -} - -// MetaWithToken groups session and bearer tokens, -// verification header and raw API request. -type MetaWithToken struct { - vheader *sessionV2.RequestVerificationHeader - token *sessionSDK.Object - bearer *bearer.Token - src any -} - -// RequestOwner returns ownerID and its public key -// according to internal meta information. -func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if r.vheader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if r.bearer != nil && r.bearer.Impersonate() { - return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if r.token != nil { - // verify signature of session token - return ownerFromToken(r.token) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(r.vheader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go deleted file mode 100644 index 3e128836f..000000000 --- a/pkg/services/object/acl/v2/service.go +++ /dev/null @@ -1,792 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - "strings" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" -) - -// Service checks basic ACL rules. -type Service struct { - *cfg - - c objectCore.SenderClassifier -} - -type putStreamBasicChecker struct { - source *Service - next object.PutObjectStream -} - -type getStreamBasicChecker struct { - checker ACLChecker - - object.GetObjectStream - - info RequestInfo -} - -type rangeStreamBasicChecker struct { - checker ACLChecker - - object.GetObjectRangeStream - - info RequestInfo -} - -type searchStreamBasicChecker struct { - checker ACLChecker - - object.SearchStream - - info RequestInfo -} - -// Option represents Service constructor option. -type Option func(*cfg) - -type cfg struct { - log *logger.Logger - - containers container.Source - - checker ACLChecker - - irFetcher InnerRingFetcher - - nm netmap.Source - - next object.ServiceServer -} - -// New is a constructor for object ACL checking service. -func New(next object.ServiceServer, - nm netmap.Source, - irf InnerRingFetcher, - acl ACLChecker, - cs container.Source, - opts ...Option, -) Service { - cfg := &cfg{ - log: &logger.Logger{Logger: zap.L()}, - next: next, - nm: nm, - irFetcher: irf, - checker: acl, - containers: cs, - } - - for i := range opts { - opts[i](cfg) - } - - return Service{ - cfg: cfg, - c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), - } -} - -// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedGetObjectStream struct { - object.GetObjectStream - - requestInfo RequestInfo -} - -func (w *wrappedGetObjectStream) Context() context.Context { - return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { - return &wrappedGetObjectStream{ - GetObjectStream: getObjectStream, - requestInfo: reqInfo, - } -} - -// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedRangeStream struct { - object.GetObjectRangeStream - - requestInfo RequestInfo -} - -func (w *wrappedRangeStream) Context() context.Context { - return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { - return &wrappedRangeStream{ - GetObjectRangeStream: rangeStream, - requestInfo: reqInfo, - } -} - -// wrappedSearchStream propagates RequestContext into SearchStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedSearchStream struct { - object.SearchStream - - requestInfo RequestInfo -} - -func (w *wrappedSearchStream) Context() context.Context { - return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { - return &wrappedSearchStream{ - SearchStream: searchStream, - requestInfo: reqInfo, - } -} - -// Get implements ServiceServer interface, makes ACL checks and calls -// next Get method in the ServiceServer pipeline. -func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet) - if err != nil { - return err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - - return b.next.Get(request, &getStreamBasicChecker{ - GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo), - info: reqInfo, - checker: b.checker, - }) -} - -func (b Service) Put() (object.PutObjectStream, error) { - streamer, err := b.next.Put() - - return putStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Head( - ctx context.Context, - request *objectV2.HeadRequest, -) (*objectV2.HeadResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - - resp, err := b.next.Head(requestContext(ctx, reqInfo), request) - if err == nil { - if err = b.checker.CheckEACL(resp, reqInfo); err != nil { - err = eACLErr(reqInfo, err) - } - } - - return resp, err -} - -func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { - id, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, id, nil) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch) - if err != nil { - return err - } - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - - return b.next.Search(request, &searchStreamBasicChecker{ - checker: b.checker, - SearchStream: newWrappedSearchStream(stream, reqInfo), - info: reqInfo, - }) -} - -func (b Service) Delete( - ctx context.Context, - request *objectV2.DeleteRequest, -) (*objectV2.DeleteResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - - return b.next.Delete(requestContext(ctx, reqInfo), request) -} - -func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange) - if err != nil { - return err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - - return b.next.GetRange(request, &rangeStreamBasicChecker{ - checker: b.checker, - GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo), - info: reqInfo, - }) -} - -func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { - return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ - Namespace: reqInfo.ContainerNamespace(), - ContainerOwner: reqInfo.ContainerOwner(), - SenderKey: reqInfo.SenderKey(), - Role: reqInfo.RequestRole(), - SoftAPECheck: reqInfo.IsSoftAPECheck(), - BearerToken: reqInfo.Bearer(), - }) -} - -func (b Service) GetRangeHash( - ctx context.Context, - request *objectV2.GetRangeHashRequest, -) (*objectV2.GetRangeHashResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - - return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) -} - -func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid object owner: %w", err) - } - - obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) - if err != nil { - return nil, err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return nil, err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) { - return nil, basicACLErr(reqInfo) - } - if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - - return b.next.PutSingle(requestContext(ctx, reqInfo), request) -} - -func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - part := body.GetObjectPart() - if part, ok := part.(*objectV2.PutObjectPartInit); ok { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - idV2 := part.GetHeader().GetOwnerID() - if idV2 == nil { - return errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return fmt.Errorf("invalid object owner: %w", err) - } - - objV2 := part.GetObjectID() - var obj *oid.ID - - if objV2 != nil { - obj = new(oid.ID) - - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut) - if err != nil { - return err - } - - reqInfo.obj = obj - - if reqInfo.IsSoftAPECheck() { - if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) { - return basicACLErr(reqInfo) - } - } - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} - -func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { - if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - } - - return g.GetObjectStream.Send(resp) -} - -func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - - return g.GetObjectRangeStream.Send(resp) -} - -func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - - return g.SearchStream.Send(resp) -} - -func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { - cnr, err := b.containers.Get(idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch() - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - - if !assertVerb(*req.token, op) { - return info, errInvalidVerb - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.operation = op - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go deleted file mode 100644 index 061cd26b6..000000000 --- a/pkg/services/object/acl/v2/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// ACLChecker is an interface that must provide -// ACL related checks. -type ACLChecker interface { - // CheckBasicACL must return true only if request - // passes basic ACL validation. - CheckBasicACL(RequestInfo) bool - // CheckEACL must return non-nil error if request - // doesn't pass extended ACL validation. - CheckEACL(any, RequestInfo) error - // StickyBitCheck must return true only if sticky bit - // is disabled or enabled but request contains correct - // owner field. - StickyBitCheck(RequestInfo, user.ID) bool -} - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys() ([][]byte, error) -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go deleted file mode 100644 index 394feef4e..000000000 --- a/pkg/services/object/acl/v2/util_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" - aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "github.com/stretchr/testify/require" -) - -func TestOriginalTokens(t *testing.T) { - sToken := sessiontest.ObjectSigned() - bToken := bearertest.Token() - - pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, bToken.Sign(*pk)) - - var bTokenV2 acl.BearerToken - bToken.WriteToV2(&bTokenV2) - // This line is needed because SDK uses some custom format for - // reserved filters, so `cid.ID` is not converted to string immediately. - require.NoError(t, bToken.ReadFromV2(bTokenV2)) - - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - - for i := 0; i < 10; i++ { - metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) - res, err := originalSessionToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, sToken, res, i) - - bTok, err := originalBearerToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, &bToken, bTok, i) - } -} - -func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetBearerToken(b) - metaHeader.SetSessionToken(s) - - for i := uint32(0); i < depth; i++ { - link := metaHeader - metaHeader = new(session.RequestMetaHeader) - metaHeader.SetOrigin(link) - } - - return metaHeader -} - -func TestIsVerbCompatible(t *testing.T) { - // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 - table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ - aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, - aclsdk.OpObjectHead: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - }, - aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - var contains bool - for _, v := range list { - if v == verb { - contains = true - break - } - } - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index a1972292e..bb6067a37 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -2,49 +2,41 @@ package ape import ( "context" - "crypto/ecdsa" "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) type checkerImpl struct { - localOverrideStorage policyengine.LocalOverrideStorage - morphChainStorage policyengine.MorphRuleChainStorageReader - headerProvider HeaderProvider - frostFSIDClient frostfsidcore.SubjectProvider - nm netmap.Source - st netmap.State - cnrSource container.Source - nodePK []byte + checkerCore checkercore.CheckCore + frostFSIDClient frostfsidcore.SubjectProvider + headerProvider HeaderProvider + nm netmap.Source + cnrSource container.Source + nodePK []byte } func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker { return &checkerImpl{ - localOverrideStorage: localOverrideStorage, - morphChainStorage: morphChainStorage, - headerProvider: headerProvider, - frostFSIDClient: frostFSIDClient, - nm: nm, - st: st, - cnrSource: cnrSource, - nodePK: nodePK, + checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st), + frostFSIDClient: frostFSIDClient, + headerProvider: headerProvider, + nm: nm, + cnrSource: cnrSource, + nodePK: nodePK, } } @@ -72,11 +64,8 @@ type Prm struct { // An encoded container's owner user ID. ContainerOwner user.ID - // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow. - SoftAPECheck bool - - // If true, object headers will not retrieved from storage engine. - WithoutHeaderRequest bool + // Attributes defined for the container. + ContainerAttributes map[string]string // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token @@ -85,73 +74,15 @@ type Prm struct { XHeaders []session.XHeader } -var ( - errMissingOID = errors.New("object ID is not set") - errInvalidTargetType = errors.New("bearer token defines non-container target override") - errBearerExpired = errors.New("bearer token has expired") - errBearerInvalidSignature = errors.New("bearer token has invalid signature") - errBearerInvalidContainerID = errors.New("bearer token was created for another container") - errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") - errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") -) +var errMissingOID = errors.New("object ID is not set") -// isValidBearer checks whether bearer token was correctly signed by authorized -// entity. This method might be defined on whole ACL service because it will -// require fetching current epoch to check lifetime. -func isValidBearer(token *bearer.Token, ownerCnr user.ID, containerID cid.ID, publicKey *keys.PublicKey, st netmap.State) error { - if token == nil { - return nil - } - - // First check token lifetime. Simplest verification. - if token.InvalidAt(st.CurrentEpoch()) { - return errBearerExpired - } - - // Then check if bearer token is signed correctly. - if !token.VerifySignature() { - return errBearerInvalidSignature - } - - // Check for ape overrides defined in the bearer token. - apeOverride := token.APEOverride() - if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { - return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) - } - - // Then check if container is either empty or equal to the container in the request. - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !containerID.Equals(targetCnr) { - return errBearerInvalidContainerID - } - - // Then check if container owner signed this token. - if !bearer.ResolveIssuer(*token).Equals(ownerCnr) { - return errBearerNotSignedByOwner - } - - // Then check if request sender has rights to use this token. - var usrSender user.ID - user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner - } - - return nil -} - -// CheckAPE checks if a request or a response is permitted creating an ape request and passing -// it to chain router. +// CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - if prm.Role == nativeschema.PropertyValueContainerRoleContainer { + switch prm.Role { + case nativeschema.PropertyValueContainerRoleContainer: return nil - } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { + case nativeschema.PropertyValueContainerRoleIR: switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, @@ -171,38 +102,13 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { if err != nil { return err } - groups, err := aperequest.Groups(c.frostFSIDClient, pub) - if err != nil { - return fmt.Errorf("failed to get group ids: %w", err) - } - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i]) - } - - var cr policyengine.ChainRouter - if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { - if err := isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, pub, c.st); err != nil { - return fmt.Errorf("bearer token validation error: %w", err) - } - cr, err = router.BearerChainFeedRouter(c.localOverrideStorage, c.morphChainStorage, prm.BearerToken.APEOverride()) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) - } - } else { - cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.morphChainStorage, c.localOverrideStorage) - } - - rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, pub.Address()), groups) - status, ruleFound, err := cr.IsAllowed(apechain.Ingress, rt, r) - if err != nil { - return err - } - - if !ruleFound && prm.SoftAPECheck || status == apechain.Allow { - return nil - } - - return fmt.Errorf("method %s: %s", prm.Method, status) + return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{ + Request: r, + PublicKey: pub, + Namespace: prm.Namespace, + Container: prm.Container, + ContainerOwner: prm.ContainerOwner, + BearerToken: prm.BearerToken, + }) } diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index 090f6a83c..97eb2b2d7 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -8,13 +8,13 @@ import ( "fmt" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { return pk.GetScriptHash() } -func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -619,21 +619,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -641,18 +641,18 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } -func TestPutECChunk(t *testing.T) { +func TestGetECChunk(t *testing.T) { headerProvider := newHeaderProviderMock() frostfsidProvider := newFrostfsIDProviderMock(t) @@ -666,11 +666,10 @@ func TestPutECChunk(t *testing.T) { Rules: []chain.Rule{ { Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsOptionalOID}, + Actions: chain.Actions{Names: methodsRequiredOID}, Resources: chain.Resources{ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, }, - Any: true, Condition: []chain.Condition{ { Op: chain.CondStringEquals, @@ -680,21 +679,32 @@ func TestPutECChunk(t *testing.T) { }, }, }, + { + Status: chain.Allow, + Actions: chain.Actions{Names: methodsRequiredOID}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, }, - MatchType: chain.MatchTypeFirstMatch, }) node1Key, err := keys.NewPrivateKey() require.NoError(t, err) node1 := netmapSDK.NodeInfo{} node1.SetPublicKey(node1Key.PublicKey().Bytes()) + node2Key, err := keys.NewPrivateKey() + require.NoError(t, err) + node2 := netmapSDK.NodeInfo{} + node2.SetPublicKey(node1Key.PublicKey().Bytes()) netmap := &netmapSDK.NetMap{} netmap.SetEpoch(100) - netmap.SetNodes([]netmapSDK.NodeInfo{node1}) + netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2}) nm := &netmapStub{ currentEpoch: 100, netmaps: map[uint64]*netmapSDK.NetMap{ + 99: netmap, 100: netmap, }, } @@ -702,7 +712,7 @@ func TestPutECChunk(t *testing.T) { cont := containerSDK.Container{} cont.Init() pp := netmapSDK.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) + require.NoError(t, pp.DecodeString("EC 1.1")) cont.SetPlacementPolicy(pp) cs := &testContainerSource{ containers: map[cid.ID]*container.Container{ @@ -718,7 +728,7 @@ func TestPutECChunk(t *testing.T) { chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader() ecHeader := object.ECHeader{ Index: 1, - Total: 5, + Total: 2, Parent: &refs.ObjectID{}, } chunkHeader.SetEC(&ecHeader) @@ -737,32 +747,33 @@ func TestPutECChunk(t *testing.T) { }) headerProvider.addHeader(cnr, ecParentID, parentHeader) - t.Run("access denied for container node", func(t *testing.T) { + // container node requests EC parent headers, so container node denies access by matching attribute key/value + t.Run("access denied on container node", func(t *testing.T) { prm := Prm{ - Method: nativeschema.MethodPutObject, - Container: cnr, - Object: obj, - Role: role, - SenderKey: senderKey, - Header: chunkHeader, - SoftAPECheck: true, + Method: nativeschema.MethodGetObject, + Container: cnr, + Object: obj, + Role: role, + SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()), + Header: chunkHeader, } err = checker.CheckAPE(context.Background(), prm) require.Error(t, err) }) - t.Run("access allowed for non container node", func(t *testing.T) { + + // non container node has no access rights to collect EC parent header, so it uses EC chunk headers + t.Run("access allowed on non container node", func(t *testing.T) { otherKey, err := keys.NewPrivateKey() require.NoError(t, err) checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes()) prm := Prm{ - Method: nativeschema.MethodPutObject, - Container: cnr, - Object: obj, - Role: nativeschema.PropertyValueContainerRoleOthers, - SenderKey: senderKey, - Header: chunkHeader, - SoftAPECheck: true, + Method: nativeschema.MethodGetObject, + Container: cnr, + Object: obj, + Role: nativeschema.PropertyValueContainerRoleOthers, + SenderKey: senderKey, + Header: chunkHeader, } err = checker.CheckAPE(context.Background(), prm) diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 1b2024ed5..82e660a7f 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -1,10 +1,34 @@ package ape import ( + "errors" + + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) +var ( + errMissingContainerID = malformedRequestError("missing container ID") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) + +func malformedRequestError(reason string) error { + invalidArgErr := &apistatus.InvalidArgument{} + invalidArgErr.SetMessage(reason) + return invalidArgErr +} + func toStatusErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal + } errAccessDenied := &apistatus.ObjectAccessDenied{} errAccessDenied.WriteReason("ape denied request: " + err.Error()) return errAccessDenied diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go new file mode 100644 index 000000000..102985aa6 --- /dev/null +++ b/pkg/services/object/ape/metadata.go @@ -0,0 +1,179 @@ +package ape + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +type Metadata struct { + Container cid.ID + Object *oid.ID + MetaHeader *session.RequestMetaHeader + VerificationHeader *session.RequestVerificationHeader + SessionToken *sessionSDK.Object + BearerToken *bearer.Token +} + +func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if m.VerificationHeader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if m.BearerToken != nil && m.BearerToken.Impersonate() { + return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if m.SessionToken != nil { + // verify signature of session token + return ownerFromToken(m.SessionToken) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(m.VerificationHeader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +// RequestInfo contains request information extracted by request metadata. +type RequestInfo struct { + // Role defines under which role this request is executed. + // It must be represented only as a constant represented in native schema. + Role string + + ContainerOwner user.ID + + ContainerAttributes map[string]string + + // Namespace defines to which namespace a container is belonged. + Namespace string + + // HEX-encoded sender key. + SenderKey string +} + +type RequestInfoExtractor interface { + GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) +} + +type extractor struct { + containers container.Source + + nm netmap.Source + + classifier objectCore.SenderClassifier +} + +func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { + return &extractor{ + containers: containers, + nm: nm, + classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), + } +} + +func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { + currentEpoch, err := e.nm.Epoch(ctx) + if err != nil { + return errors.New("can't fetch current epoch") + } + if sessionToken.ExpiredAt(currentEpoch) { + return new(apistatus.SessionTokenExpired) + } + if sessionToken.InvalidAt(currentEpoch) { + return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) + } + if !assertVerb(*sessionToken, method) { + return errInvalidVerb + } + return nil +} + +func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { + cnr, err := e.containers.Get(ctx, m.Container) + if err != nil { + return ri, err + } + + if m.SessionToken != nil { + if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { + return ri, err + } + } + + ownerID, ownerKey, err := m.RequestOwner() + if err != nil { + return ri, err + } + res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) + if err != nil { + return ri, err + } + + ri.Role = nativeSchemaRole(res.Role) + ri.ContainerOwner = cnr.Value.Owner() + + ri.ContainerAttributes = map[string]string{} + for key, val := range cnr.Value.Attributes() { + ri.ContainerAttributes[key] = val + } + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + ri.Namespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + ri.SenderKey = hex.EncodeToString(res.Key) + + return ri, nil +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/ape/metadata_test.go similarity index 79% rename from pkg/services/object/acl/v2/request_test.go rename to pkg/services/object/ape/metadata_test.go index 980d1a2e5..fd919008f 100644 --- a/pkg/services/object/acl/v2/request_test.go +++ b/pkg/services/object/ape/metadata_test.go @@ -1,11 +1,11 @@ -package v2 +package ape import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" @@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) { vh.SetBodySignature(&userSignature) t.Run("empty verification header", func(t *testing.T) { - req := MetaWithToken{} + req := Metadata{} checkOwner(t, req, nil, errEmptyVerificationHeader) }) t.Run("empty verification header signature", func(t *testing.T) { - req := MetaWithToken{ - vheader: new(sessionV2.RequestVerificationHeader), + req := Metadata{ + VerificationHeader: new(sessionV2.RequestVerificationHeader), } checkOwner(t, req, nil, errEmptyBodySig) }) t.Run("no tokens", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, + req := Metadata{ + VerificationHeader: vh, } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, false), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, false), } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) { pk, err := keys.NewPrivateKey() require.NoError(t, err) - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), - token: newSession(t, pk), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), + SessionToken: newSession(t, pk), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) t.Run("with session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - token: newSession(t, containerOwner), + req := Metadata{ + VerificationHeader: vh, + SessionToken: newSession(t, containerOwner), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) { var tok sessionSDK.Object require.NoError(t, tok.ReadFromV2(tokV2)) - req := MetaWithToken{ - vheader: vh, - token: &tok, + req := Metadata{ + VerificationHeader: vh, + SessionToken: &tok, } checkOwner(t, req, nil, errInvalidSessionOwner) }) @@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool return &tok } -func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { +func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { _, actual, err := req.RequestOwner() if expectedErr != nil { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index 1c129f65f..39dd7f476 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -3,14 +3,16 @@ package ape import ( "context" "crypto/sha256" + "errors" "fmt" "net" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -24,6 +26,8 @@ import ( var defaultRequest = aperequest.Request{} +var errECMissingParentObjectID = errors.New("missing EC parent object ID") + func nativeSchemaRole(role acl.Role) string { switch role { case acl.RoleOwner: @@ -53,11 +57,16 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string { } // objectProperties collects object properties from address parameters and a header if it is passed. -func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string { +func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string { objectProps := map[string]string{ nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(), } + for attrName, attrValue := range cnrAttrs { + prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName) + objectProps[prop] = attrValue + } + objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString() if oid != nil { @@ -103,7 +112,8 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re nativeschema.MethodHeadObject, nativeschema.MethodRangeObject, nativeschema.MethodHashObject, - nativeschema.MethodDeleteObject: + nativeschema.MethodDeleteObject, + nativeschema.MethodPatchObject: if prm.Object == nil { return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID) } @@ -115,13 +125,16 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re var header *objectV2.Header if prm.Header != nil { header = prm.Header - } else if prm.Object != nil && !prm.WithoutHeaderRequest { + } else if prm.Object != nil { headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true) if err == nil { header = headerObjSDK.ToV2().GetHeader() } } - header = c.fillHeaderWithECParent(ctx, prm, header) + header, err := c.fillHeaderWithECParent(ctx, prm, header) + if err != nil { + return defaultRequest, fmt.Errorf("get EC parent header: %w", err) + } reqProps := map[string]string{ nativeschema.PropertyKeyActorPublicKey: prm.SenderKey, nativeschema.PropertyKeyActorRole: prm.Role, @@ -132,8 +145,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re reqProps[xheadKey] = xhead.GetValue() } - var err error - reqProps, err = c.fillWithUserClaimTags(reqProps, prm) + reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm) if err != nil { return defaultRequest, err } @@ -148,50 +160,58 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re prm.Method, aperequest.NewResource( resourceName(prm.Container, prm.Object, prm.Namespace), - objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header), + objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header), ), reqProps, ), nil } -func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) *objectV2.Header { +func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) { if header == nil { - return header + return header, nil } if header.GetEC() == nil { - return header - } - if prm.Role == nativeschema.PropertyValueContainerRoleContainer || - prm.Role == nativeschema.PropertyValueContainerRoleIR { - return header + return header, nil } parentObjRefID := header.GetEC().Parent if parentObjRefID == nil { - return header + return nil, errECMissingParentObjectID } var parentObjID oid.ID if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil { - return header + return nil, fmt.Errorf("EC parent object ID format error: %w", err) } // only container node have access to collect parent object - contNode, err := c.currentNodeIsContainerNode(prm.Container) - if err != nil || !contNode { - return header + contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container) + if err != nil { + return nil, fmt.Errorf("check container node status: %w", err) + } + if !contNode { + return header, nil } parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false) if err != nil { - return header + if isLogicalError(err) { + return header, nil + } + return nil, fmt.Errorf("EC parent header request: %w", err) } - return parentObj.ToV2().GetHeader() + return parentObj.ToV2().GetHeader(), nil } -func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { - cnr, err := c.cnrSource.Get(cnrID) +func isLogicalError(err error) bool { + var errObjRemoved *apistatus.ObjectAlreadyRemoved + var errObjNotFound *apistatus.ObjectNotFound + return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) +} + +func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) { + cnr, err := c.cnrSource.Get(ctx, cnrID) if err != nil { return false, err } - nm, err := netmap.GetLatestNetworkMap(c.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -205,7 +225,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { return true, nil } - nm, err = netmap.GetPreviousNetworkMap(c.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -214,7 +234,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) { +func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } @@ -222,7 +242,7 @@ func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) if err != nil { return nil, err } - props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index 9dad69d17..fcf7c4c40 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -6,8 +6,9 @@ import ( "net" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" + cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -19,11 +20,20 @@ import ( ) const ( - testOwnerID = "FPPtmAi9TCX329" + testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" incomingIP = "192.92.33.1" + + testSysAttrName = "unittest" + + testSysAttrZone = "eggplant" ) +var containerAttrs = map[string]string{ + cnrV2.SysAttributeName: testSysAttrName, + cnrV2.SysAttributeZone: testSysAttrZone, +} + func ctxWithPeerInfo() context.Context { return peer.NewContext(context.Background(), &peer.Peer{ Addr: &net.TCPAddr{ @@ -105,7 +115,7 @@ func TestObjectProperties(t *testing.T) { var testCnrOwner user.ID require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader()) + props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader()) require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID]) @@ -124,6 +134,8 @@ func TestObjectProperties(t *testing.T) { require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType]) require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash]) require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)]) for _, attr := range test.header.attributes { require.Equal(t, attr.val, props[attr.key]) @@ -245,6 +257,10 @@ func TestNewAPERequest(t *testing.T) { Role: role, SenderKey: senderKey, ContainerOwner: testCnrOwner, + ContainerAttributes: map[string]string{ + cnrV2.SysAttributeZone: testSysAttrZone, + cnrV2.SysAttributeName: testSysAttrName, + }, } headerSource := newHeaderProviderMock() @@ -277,7 +293,7 @@ func TestNewAPERequest(t *testing.T) { method, aperequest.NewResource( resourceName(cnr, obj, prm.Namespace), - objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header { + objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header { if headerObjSDK != nil { return headerObjSDK.ToV2().GetHeader() } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index 2adb1b736..5e04843f3 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,32 +2,25 @@ package ape import ( "context" - "encoding/hex" - "errors" - "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) -var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") - type Service struct { - log *logger.Logger - apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.ServiceServer } @@ -67,10 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { return &Service{ - log: log, apeChecker: apeChecker, + extractor: extractor, next: next, } } @@ -80,17 +73,9 @@ type getStreamBasicChecker struct { apeChecker Checker - namespace string + metadata Metadata - senderKey []byte - - containerOwner user.ID - - role string - - softAPECheck bool - - bearerToken *bearer.Token + reqInfo RequestInfo } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -101,17 +86,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodGetObject, - SenderKey: hex.EncodeToString(g.senderKey), - ContainerOwner: g.containerOwner, - Role: g.role, - SoftAPECheck: g.softAPECheck, - BearerToken: g.bearerToken, - XHeaders: resp.GetMetaHeader().GetXHeaders(), + Namespace: g.reqInfo.Namespace, + Container: cnrID, + Object: objID, + Header: partInit.GetHeader(), + Method: nativeschema.MethodGetObject, + SenderKey: g.reqInfo.SenderKey, + ContainerOwner: g.reqInfo.ContainerOwner, + ContainerAttributes: g.reqInfo.ContainerAttributes, + Role: g.reqInfo.Role, + BearerToken: g.metadata.BearerToken, + XHeaders: resp.GetMetaHeader().GetXHeaders(), } if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil { @@ -121,66 +106,54 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } -func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { - untyped := ctx.Value(objectSvc.RequestContextKey) - if untyped == nil { - return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) - } - rc, ok := untyped.(*objectSvc.RequestContext) - if !ok { - return nil, errFailedToCastToRequestContext - } - return rc, nil -} - func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) + if err != nil { + return err } - return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - namespace: reqCtx.Namespace, - senderKey: reqCtx.SenderKey, - containerOwner: reqCtx.ContainerOwner, - role: nativeSchemaRole(reqCtx.Role), - softAPECheck: reqCtx.SoftAPECheck, - bearerToken: reqCtx.BearerToken, + metadata: md, + reqInfo: reqInfo, }) } type putStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodPutObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: partInit.GetHeader(), + Method: nativeschema.MethodPutObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -195,22 +168,80 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR return p.next.CloseAndRecv(ctx) } -func (c *Service) Put() (objectSvc.PutObjectStream, error) { - streamer, err := c.next.Put() +func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { + streamer, err := c.next.Put(ctx) return &putStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, + next: streamer, + }, err +} + +type patchStreamBasicChecker struct { + apeChecker Checker + + extractor RequestInfoExtractor + + next objectSvc.PatchObjectStream + + nonFirstSend bool +} + +func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { + if !p.nonFirstSend { + p.nonFirstSend = true + + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + if err != nil { + return err + } + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) + if err != nil { + return err + } + + prm := Prm{ + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodPatchObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), + } + + if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { + return toStatusErr(err) + } + } + + return p.next.Send(ctx, request) +} + +func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + return p.next.CloseAndRecv(ctx) +} + +func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) { + streamer, err := c.next.Patch(ctx) + + return &patchStreamBasicChecker{ + apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) if err != nil { return nil, err } @@ -224,7 +255,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - cnrID.WriteToV2(cidV2) + md.Container.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -240,17 +271,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: header, - Method: nativeschema.MethodHeadObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: header, + Method: nativeschema.MethodHeadObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -259,28 +290,25 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - var cnrID cid.ID - if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { - if err := cnrID.ReadFromV2(*cnrV2); err != nil { - return toStatusErr(err) - } - } - - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + if err != nil { + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Method: nativeschema.MethodSearchObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Method: nativeschema.MethodSearchObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -290,27 +318,26 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodDeleteObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodDeleteObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -325,27 +352,26 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - reqCtx, err := requestContext(stream.Context()) + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) if err != nil { - return toStatusErr(err) + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodRangeObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodRangeObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -355,27 +381,26 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodHashObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodHashObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -390,28 +415,27 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: request.GetBody().GetObject().GetHeader(), - Method: nativeschema.MethodPutObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: request.GetBody().GetObject().GetHeader(), + Method: nativeschema.MethodPutObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -421,18 +445,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return - } +type request interface { + GetMetaHeader() *session.RequestMetaHeader + GetVerificationHeader() *session.RequestVerificationHeader +} + +func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) + if err != nil { + return + } + session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) + if err != nil { + return + } + bearer, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return + } + + md = Metadata{ + Container: cnrID, + Object: objID, + VerificationHeader: request.GetVerificationHeader(), + SessionToken: session, + BearerToken: bearer, } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 46e55360d..97dbfa658 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,3 +7,11 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys(ctx context.Context) ([][]byte, error) +} diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go similarity index 55% rename from pkg/services/object/acl/v2/util.go rename to pkg/services/object/ape/util.go index feda6a3cf..5cd2caa50 100644 --- a/pkg/services/object/acl/v2/util.go +++ b/pkg/services/object/ape/util.go @@ -1,4 +1,4 @@ -package v2 +package ape import ( "crypto/ecdsa" @@ -6,55 +6,34 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -var errMissingContainerID = errors.New("missing container ID") - -func getContainerIDFromRequest(req any) (cid.ID, error) { - var idV2 *refsV2.ContainerID - var id cid.ID - - switch v := req.(type) { - case *objectV2.GetRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutRequest: - part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) - if !ok { - return cid.ID{}, errors.New("can't get container ID in chunk") +func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return } - - idV2 = part.GetHeader().GetContainerID() - case *objectV2.HeadRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.SearchRequest: - idV2 = v.GetBody().GetContainerID() - case *objectV2.DeleteRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeHashRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutSingleRequest: - idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() - default: - return cid.ID{}, errors.New("unknown request type") + } else { + err = errMissingContainerID + return } - if idV2 == nil { - return cid.ID{}, errMissingContainerID + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } } - - return id, id.ReadFromV2(*idV2) + return } // originalBearerToken goes down to original request meta header and fetches @@ -73,50 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er return &tok, tok.ReadFromV2(*tokV2) } -// originalSessionToken goes down to original request meta header and fetches -// session token from there. -func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetSessionToken() - if tokV2 == nil { - return nil, nil - } - - var tok sessionSDK.Object - - err := tok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - return &tok, nil -} - -// getObjectIDFromRequestBody decodes oid.ID from the common interface of the -// object reference's holders. Returns an error if object ID is missing in the request. -func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { - idV2 := body.GetAddress().GetObjectID() - return getObjectIDFromRefObjectID(idV2) -} - -func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { - if idV2 == nil { - return nil, errors.New("missing object ID") - } - - var id oid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, err - } - - return &id, nil -} - func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { // 1. First check signature of session token. if !token.VerifySignature() { @@ -170,30 +105,33 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -// assertVerb checks that token verb corresponds to op. -func assertVerb(tok sessionSDK.Object, op acl.Op) bool { - switch op { - case acl.OpObjectPut: - return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete) - case acl.OpObjectDelete: +// assertVerb checks that token verb corresponds to the method. +func assertVerb(tok sessionSDK.Object, method string) bool { + switch method { + case nativeschema.MethodPutObject: + return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) + case nativeschema.MethodDeleteObject: return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case acl.OpObjectGet: + case nativeschema.MethodGetObject: return tok.AssertVerb(sessionSDK.VerbObjectGet) - case acl.OpObjectHead: + case nativeschema.MethodHeadObject: return tok.AssertVerb( sessionSDK.VerbObjectHead, sessionSDK.VerbObjectGet, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash) - case acl.OpObjectSearch: + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + ) + case nativeschema.MethodSearchObject: return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case acl.OpObjectRange: - return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash) - case acl.OpObjectHash: + case nativeschema.MethodRangeObject: + return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) + case nativeschema.MethodHashObject: return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) + case nativeschema.MethodPatchObject: + return tok.AssertVerb(sessionSDK.VerbObjectPatch) } - return false } @@ -217,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error return nil } + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go new file mode 100644 index 000000000..916bce427 --- /dev/null +++ b/pkg/services/object/ape/util_test.go @@ -0,0 +1,84 @@ +package ape + +import ( + "slices" + "testing" + + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/stretchr/testify/require" +) + +func TestIsVerbCompatible(t *testing.T) { + table := map[string][]sessionSDK.ObjectVerb{ + nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, + nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, + nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, + nativeschema.MethodHeadObject: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + }, + nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, + nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, + nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + sessionSDK.VerbObjectPatch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + contains := slices.Contains(list, verb) + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index 1305fa008..f8ee089fe 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -5,12 +5,12 @@ import ( "errors" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) @@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (* if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return res, err } @@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } @@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } // Put implements ServiceServer. -func (a *auditService) Put() (PutObjectStream, error) { - res, err := a.next.Put() +func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) { + res, err := a.next.Put(ctx) if !a.enabled.Load() { return res, err } if err != nil { - audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) return res, err } return &auditPutStream{ @@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(), req.GetBody().GetObject().GetObjectID()), err == nil) @@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return err } @@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, a.failed = true } a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) return resp, err @@ -163,8 +163,69 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), + !a.failed) + } + return err +} + +type auditPatchStream struct { + stream PatchObjectStream + log *logger.Logger + + failed bool + key []byte + containerID *refs.ContainerID + objectID *refs.ObjectID + + nonFirstSend bool +} + +func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) { + res, err := a.next.Patch(ctx) + if !a.enabled.Load() { + return res, err + } + if err != nil { + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) + return res, err + } + return &auditPatchStream{ + stream: res, + log: a.log, + }, nil +} + +// CloseAndRecv implements PatchObjectStream. +func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { + resp, err := a.stream.CloseAndRecv(ctx) + if err != nil { + a.failed = true + } + a.objectID = resp.GetBody().GetObjectID() + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, + audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), + !a.failed) + return resp, err +} + +// Send implements PatchObjectStream. +func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error { + if !a.nonFirstSend { + a.containerID = req.GetBody().GetAddress().GetContainerID() + a.objectID = req.GetBody().GetAddress().GetObjectID() + a.key = req.GetVerificationHeader().GetBodySignature().GetKey() + a.nonFirstSend = true + } + + err := a.stream.Send(ctx, req) + if err != nil { + a.failed = true + } + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) } diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go index 73ee9f81b..ef65e78bc 100644 --- a/pkg/services/object/common.go +++ b/pkg/services/object/common.go @@ -3,7 +3,7 @@ package object import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) @@ -40,12 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error { return x.nextHandler.Get(req, stream) } -func (x *Common) Put() (PutObjectStream, error) { +func (x *Common) Put(ctx context.Context) (PutObjectStream, error) { if x.state.IsMaintenance() { return nil, new(apistatus.NodeUnderMaintenance) } - return x.nextHandler.Put() + return x.nextHandler.Put(ctx) +} + +func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) { + if x.state.IsMaintenance() { + return nil, new(apistatus.NodeUnderMaintenance) + } + + return x.nextHandler.Patch(ctx) } func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { diff --git a/pkg/services/object/put/builder.go b/pkg/services/object/common/target/builder.go similarity index 98% rename from pkg/services/object/put/builder.go rename to pkg/services/object/common/target/builder.go index 64baf4e05..ea68365a7 100644 --- a/pkg/services/object/put/builder.go +++ b/pkg/services/object/common/target/builder.go @@ -1,4 +1,4 @@ -package putsvc +package target import ( "context" diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/common/target/pool.go similarity index 96% rename from pkg/services/object/put/pool.go rename to pkg/services/object/common/target/pool.go index ebe214caf..71da305ad 100644 --- a/pkg/services/object/put/pool.go +++ b/pkg/services/object/common/target/pool.go @@ -1,4 +1,4 @@ -package putsvc +package target import ( "sync" diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go new file mode 100644 index 000000000..f2bd907db --- /dev/null +++ b/pkg/services/object/common/target/target.go @@ -0,0 +1,168 @@ +package target + +import ( + "context" + "errors" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" +) + +func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + // prepare needed put parameters + if err := preparePrm(ctx, &prm); err != nil { + return nil, fmt.Errorf("could not prepare put parameters: %w", err) + } + + if prm.Header.Signature() != nil { + return newUntrustedTarget(ctx, &prm) + } + return newTrustedTarget(ctx, &prm) +} + +func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) + if maxPayloadSz == 0 { + return nil, errors.New("could not obtain max object size parameter") + } + + if prm.SignRequestPrivateKey == nil { + nodeKey, err := prm.Config.KeyStorage.GetKey(nil) + if err != nil { + return nil, err + } + prm.SignRequestPrivateKey = nodeKey + } + + // prepare untrusted-Put object target + return &validatingPreparedTarget{ + nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)), + fmt: prm.Config.FormatValidator, + + maxPayloadSz: maxPayloadSz, + }, nil +} + +func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + prm.Relay = nil // do not relay request without signature + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) + if maxPayloadSz == 0 { + return nil, errors.New("could not obtain max object size parameter") + } + + sToken := prm.Common.SessionToken() + + // prepare trusted-Put object target + + // get private token from local storage + var sessionInfo *util.SessionInfo + + if sToken != nil { + sessionInfo = &util.SessionInfo{ + ID: sToken.ID(), + Owner: sToken.Issuer(), + } + } + + key, err := prm.Config.KeyStorage.GetKey(sessionInfo) + if err != nil { + return nil, fmt.Errorf("could not receive session key: %w", err) + } + + // In case session token is missing, the line above returns the default key. + // If it isn't owner key, replication attempts will fail, thus this check. + ownerObj := prm.Header.OwnerID() + if ownerObj.IsEmpty() { + return nil, errors.New("missing object owner") + } + + if sToken == nil { + var ownerSession user.ID + user.IDFromKey(&ownerSession, key.PublicKey) + + if !ownerObj.Equals(ownerSession) { + return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) + } + } else if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) + } + + if prm.SignRequestPrivateKey == nil { + prm.SignRequestPrivateKey = key + } + + return &validatingTarget{ + fmt: prm.Config.FormatValidator, + nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{ + Key: key, + NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) }, + NetworkState: prm.Config.NetworkState, + MaxSize: maxPayloadSz, + WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container), + SessionToken: sToken, + }), + }, nil +} + +func preparePrm(ctx context.Context, prm *objectwriter.Params) error { + var err error + + // get latest network map + nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource) + if err != nil { + return fmt.Errorf("could not get latest network map: %w", err) + } + + idCnr, ok := prm.Header.ContainerID() + if !ok { + return errors.New("missing container ID") + } + + // get container to store the object + cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr) + if err != nil { + return fmt.Errorf("could not get container by ID: %w", err) + } + + prm.Container = cnrInfo.Value + + // add common options + prm.TraverseOpts = append(prm.TraverseOpts, + // set processing container + placement.ForContainer(prm.Container), + ) + + if ech := prm.Header.ECHeader(); ech != nil { + prm.TraverseOpts = append(prm.TraverseOpts, + // set identifier of the processing object + placement.ForObject(ech.Parent()), + ) + } else if id, ok := prm.Header.ID(); ok { + prm.TraverseOpts = append(prm.TraverseOpts, + // set identifier of the processing object + placement.ForObject(id), + ) + } + + // create placement builder from network map + builder := placement.NewNetworkMapBuilder(nm) + + if prm.Common.LocalOnly() { + // restrict success count to 1 stored copy (to local storage) + prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1)) + + // use local-only placement builder + builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys) + } + + // set placement builder + prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder)) + + return nil +} diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/common/target/validation.go similarity index 99% rename from pkg/services/object/put/validation.go rename to pkg/services/object/common/target/validation.go index c2b078ef5..b29721d01 100644 --- a/pkg/services/object/put/validation.go +++ b/pkg/services/object/common/target/validation.go @@ -1,4 +1,4 @@ -package putsvc +package target import ( "bytes" diff --git a/pkg/services/object/put/common.go b/pkg/services/object/common/writer/common.go similarity index 61% rename from pkg/services/object/put/common.go rename to pkg/services/object/common/writer/common.go index 6696a192b..6593d3ca0 100644 --- a/pkg/services/object/put/common.go +++ b/pkg/services/object/common/writer/common.go @@ -1,4 +1,4 @@ -package putsvc +package writer import ( "context" @@ -13,23 +13,23 @@ import ( "go.uber.org/zap" ) -type nodeIterator struct { - traversal - cfg *cfg +type NodeIterator struct { + Traversal + cfg *Config } -func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator { - return &nodeIterator{ - traversal: traversal{ - opts: opts, - mExclude: make(map[string]*bool), +func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { + return &NodeIterator{ + Traversal: Traversal{ + Opts: opts, + Exclude: make(map[string]*bool), }, cfg: c, } } -func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error { - traverser, err := placement.NewTraverser(n.traversal.opts...) +func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { + traverser, err := placement.NewTraverser(ctx, n.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,10 +56,10 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.traversal.submitPrimaryPlacementFinish() { - err := n.forEachNode(ctx, f) + if n.submitPrimaryPlacementFinish() { + err := n.ForEachNode(ctx, f) if err != nil { - n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) + n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) // we don't fail primary operation because of broadcast failure } } @@ -67,12 +67,11 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, return nil } -func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool { +func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool { var wg sync.WaitGroup for _, addr := range addrs { - addr := addr - if ok := n.mExclude[string(addr.PublicKey())]; ok != nil { + if ok := n.Exclude[string(addr.PublicKey())]; ok != nil { if *ok { traverser.SubmitSuccess() } @@ -80,33 +79,29 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement. continue } - workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey()) + isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) item := new(bool) wg.Add(1) - if err := workerPool.Submit(func() { + go func() { defer wg.Done() - err := f(ctx, nodeDesc{local: isLocal, info: addr}) + err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) if err != nil { resErr.Store(err) - svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err) + svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err) return } traverser.SubmitSuccess() *item = true - }); err != nil { - wg.Done() - svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err) - return true - } + }() // Mark the container node as processed in order to exclude it // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.traversal.submitProcessed(addr, item) + n.submitProcessed(addr, item) } wg.Wait() @@ -114,6 +109,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement. return false } -func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool { +func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool { return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock)) } diff --git a/pkg/services/object/put/writer.go b/pkg/services/object/common/writer/dispatcher.go similarity index 97% rename from pkg/services/object/put/writer.go rename to pkg/services/object/common/writer/dispatcher.go index 53eee6006..bb9a54ce9 100644 --- a/pkg/services/object/put/writer.go +++ b/pkg/services/object/common/writer/dispatcher.go @@ -1,4 +1,4 @@ -package putsvc +package writer import ( "context" diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/common/writer/distributed.go similarity index 54% rename from pkg/services/object/put/distributed.go rename to pkg/services/object/common/writer/distributed.go index c71427b67..fff58aca7 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -1,4 +1,4 @@ -package putsvc +package writer import ( "context" @@ -13,39 +13,47 @@ type preparedObjectTarget interface { WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error } -type distributedTarget struct { +type distributedWriter struct { + cfg *Config + placementOpts []placement.Option obj *objectSDK.Object objMeta object.ContentMeta - *cfg + nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget - nodeTargetInitializer func(nodeDesc) preparedObjectTarget + relay func(context.Context, NodeDescriptor) error - relay func(context.Context, nodeDesc) error + resetSuccessAfterOnBroadcast bool } -// parameters and state of container traversal. -type traversal struct { - opts []placement.Option +// Traversal parameters and state of container. +type Traversal struct { + Opts []placement.Option // need of additional broadcast after the object is saved - extraBroadcastEnabled bool + ExtraBroadcastEnabled bool // container nodes which was processed during the primary object placement - mExclude map[string]*bool + Exclude map[string]*bool + + ResetSuccessAfterOnBroadcast bool } // updates traversal parameters after the primary placement finish and // returns true if additional container broadcast is needed. -func (x *traversal) submitPrimaryPlacementFinish() bool { - if x.extraBroadcastEnabled { +func (x *Traversal) submitPrimaryPlacementFinish() bool { + if x.ExtraBroadcastEnabled { // do not track success during container broadcast (best-effort) - x.opts = append(x.opts, placement.WithoutSuccessTracking()) + x.Opts = append(x.Opts, placement.WithoutSuccessTracking()) + + if x.ResetSuccessAfterOnBroadcast { + x.Opts = append(x.Opts, placement.ResetSuccessAfter()) + } // avoid 2nd broadcast - x.extraBroadcastEnabled = false + x.ExtraBroadcastEnabled = false return true } @@ -54,22 +62,22 @@ func (x *traversal) submitPrimaryPlacementFinish() bool { } // marks the container node as processed during the primary object placement. -func (x *traversal) submitProcessed(n placement.Node, item *bool) { - if x.extraBroadcastEnabled { +func (x *Traversal) submitProcessed(n placement.Node, item *bool) { + if x.ExtraBroadcastEnabled { key := string(n.PublicKey()) - if x.mExclude == nil { - x.mExclude = make(map[string]*bool, 1) + if x.Exclude == nil { + x.Exclude = make(map[string]*bool, 1) } - x.mExclude[key] = item + x.Exclude[key] = item } } -type nodeDesc struct { - local bool +type NodeDescriptor struct { + Local bool - info placement.Node + Info placement.Node } // errIncompletePut is returned if processing on a container fails. @@ -87,20 +95,24 @@ func (x errIncompletePut) Error() string { return commonMsg } +func (x errIncompletePut) Unwrap() error { + return x.singleErr +} + // WriteObject implements the transformer.ObjectWriter interface. -func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error { +func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj var err error - if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil { + if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil { return fmt.Errorf("(%T) could not validate payload content: %w", t, err) } return t.iteratePlacement(ctx) } -func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error { - if !node.local && t.relay != nil { +func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error { + if !node.Local && t.relay != nil { return t.relay(ctx, node) } @@ -113,10 +125,11 @@ func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error return nil } -func (t *distributedTarget) iteratePlacement(ctx context.Context) error { +func (t *distributedWriter) iteratePlacement(ctx context.Context) error { id, _ := t.obj.ID() - iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id))) - iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */) - return iter.forEachNode(ctx, t.sendObject) + iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id))) + iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */) + iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast + return iter.ForEachNode(ctx, t.sendObject) } diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/common/writer/ec.go similarity index 51% rename from pkg/services/object/put/ec.go rename to pkg/services/object/common/writer/ec.go index 6da50195e..26a53e315 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -1,4 +1,4 @@ -package putsvc +package writer import ( "context" @@ -14,32 +14,39 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) -var _ transformer.ObjectWriter = (*ecWriter)(nil) +var _ transformer.ObjectWriter = (*ECWriter)(nil) -var errUnsupportedECObject = errors.New("object is not supported for erasure coding") +var ( + errUnsupportedECObject = errors.New("object is not supported for erasure coding") + errFailedToSaveAllECParts = errors.New("failed to save all EC parts") +) -type ecWriter struct { - cfg *cfg - placementOpts []placement.Option - container containerSDK.Container - key *ecdsa.PrivateKey - commonPrm *svcutil.CommonPrm - relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error +type ECWriter struct { + Config *Config + PlacementOpts []placement.Option + Container containerSDK.Container + Key *ecdsa.PrivateKey + CommonPrm *svcutil.CommonPrm + Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error - objMeta object.ContentMeta - objMetaValid bool + ObjectMeta object.ContentMeta + ObjectMetaValid bool + + remoteRequestSignKey *ecdsa.PrivateKey } -func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - relayed, err := e.relayIfNotContainerNode(ctx) +func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { + relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj) if err != nil { return err } @@ -52,11 +59,23 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error return errUnsupportedECObject } - if !e.objMetaValid { - if e.objMeta, err = e.cfg.fmtValidator.ValidateContent(obj); err != nil { + if !e.ObjectMetaValid { + if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil { return fmt.Errorf("(%T) could not validate payload content: %w", e, err) } - e.objMetaValid = true + e.ObjectMetaValid = true + } + + if isContainerNode { + restoreTokens := e.CommonPrm.ForgetTokens() + defer restoreTokens() + // As request executed on container node, so sign request with container key. + e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil) + if err != nil { + return err + } + } else { + e.remoteRequestSignKey = e.Key } if obj.ECHeader() != nil { @@ -65,26 +84,32 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error return e.writeRawObject(ctx, obj) } -func (e *ecWriter) relayIfNotContainerNode(ctx context.Context) (bool, error) { - if e.relay == nil { - return false, nil - } - currentNodeIsContainerNode, err := e.currentNodeIsContainerNode() +func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { + currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx) if err != nil { - return false, err + return false, false, err } if currentNodeIsContainerNode { // object can be splitted or saved local - return false, nil + return false, true, nil } - if err := e.relayToContainerNode(ctx); err != nil { - return false, err + if e.Relay == nil { + return false, currentNodeIsContainerNode, nil } - return true, nil + objID := object.AddressOf(obj).Object() + var index uint32 + if obj.ECHeader() != nil { + objID = obj.ECHeader().Parent() + index = obj.ECHeader().Index() + } + if err := e.relayToContainerNode(ctx, objID, index); err != nil { + return false, false, err + } + return true, currentNodeIsContainerNode, nil } -func (e *ecWriter) currentNodeIsContainerNode() (bool, error) { - t, err := placement.NewTraverser(e.placementOpts...) +func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) { + t, err := placement.NewTraverser(ctx, e.PlacementOpts...) if err != nil { return false, err } @@ -94,7 +119,7 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) { break } for _, node := range nodes { - if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { + if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) { return true, nil } } @@ -102,41 +127,33 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) { return false, nil } -func (e *ecWriter) relayToContainerNode(ctx context.Context) error { - t, err := placement.NewTraverser(e.placementOpts...) +func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error { + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } var lastErr error + offset := int(index) for { nodes := t.Next() if len(nodes) == 0 { break } - for _, node := range nodes { + for idx := range nodes { + node := nodes[(idx+offset)%len(nodes)] var info client.NodeInfo client.NodeInfoFromNetmapElement(&info, node) - c, err := e.cfg.clientConstructor.Get(info) + c, err := e.Config.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - completed := make(chan interface{}) - if poolErr := e.cfg.remotePool.Submit(func() { - defer close(completed) - err = e.relay(ctx, info, c) - }); poolErr != nil { - close(completed) - svcutil.LogWorkerPoolError(e.cfg.log, "PUT", poolErr) - return poolErr - } - <-completed - + err = e.Relay(ctx, info, c) if err == nil { return nil } - e.cfg.log.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup())) + e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup())) lastErr = err } } @@ -148,8 +165,12 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context) error { } } -func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error { - t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(obj.ECHeader().Parent()))...) +func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error { + if e.CommonPrm.LocalOnly() { + return e.writePartLocal(ctx, obj) + } + + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) if err != nil { return err } @@ -174,24 +195,25 @@ func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error return nil } -func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error { +func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error { // now only single EC policy is supported - c, err := erasurecode.NewConstructor(policy.ECDataCount(e.container.PlacementPolicy()), policy.ECParityCount(e.container.PlacementPolicy())) + c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy())) if err != nil { return err } - parts, err := c.Split(obj, e.key) + parts, err := c.Split(obj, e.Key) if err != nil { return err } + partsProcessed := make([]atomic.Bool, len(parts)) objID, _ := obj.ID() - t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } - eg, egCtx := errgroup.WithContext(ctx) for { + eg, egCtx := errgroup.WithContext(ctx) nodes := t.Next() if len(nodes) == 0 { break @@ -203,22 +225,35 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } for idx := range parts { - idx := idx - eg.Go(func() error { - return e.writePart(egCtx, parts[idx], idx, nodes, visited) - }) - t.SubmitSuccess() + if !partsProcessed[idx].Load() { + eg.Go(func() error { + err := e.writePart(egCtx, parts[idx], idx, nodes, visited) + if err == nil { + partsProcessed[idx].Store(true) + t.SubmitSuccess() + } + return err + }) + } } + err = eg.Wait() } - if err := eg.Wait(); err != nil { + if err != nil { return errIncompletePut{ singleErr: err, } } + for idx := range partsProcessed { + if !partsProcessed[idx].Load() { + return errIncompletePut{ + singleErr: errFailedToSaveAllECParts, + } + } + } return nil } -func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error { +func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error { select { case <-ctx.Done(): return ctx.Err() @@ -230,8 +265,10 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx err := e.putECPartToNode(ctx, obj, node) if err == nil { return nil + } else if clientSDK.IsErrObjectAlreadyRemoved(err) { + return err } - e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) @@ -255,7 +292,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx if err == nil { return nil } - e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) @@ -264,7 +301,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx } // try to save to any node not visited by current part - for i := 0; i < len(nodes); i++ { + for i := range nodes { select { case <-ctx.Done(): return ctx.Err() @@ -279,7 +316,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx if err == nil { return nil } - e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) @@ -288,50 +325,31 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj)) } -func (e *ecWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { - if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { +func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { + if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) { return e.writePartLocal(ctx, obj) } return e.writePartRemote(ctx, obj, node) } -func (e *ecWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { - var err error - localTarget := localTarget{ - storage: e.cfg.localStore, +func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { + localTarget := LocalTarget{ + Storage: e.Config.LocalStore, + Container: e.Container, } - completed := make(chan interface{}) - if poolErr := e.cfg.localPool.Submit(func() { - defer close(completed) - err = localTarget.WriteObject(ctx, obj, e.objMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return localTarget.WriteObject(ctx, obj, e.ObjectMeta) } -func (e *ecWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { +func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { var clientNodeInfo client.NodeInfo client.NodeInfoFromNetmapElement(&clientNodeInfo, node) - remoteTaget := remoteTarget{ - privateKey: e.key, - clientConstructor: e.cfg.clientConstructor, - commonPrm: e.commonPrm, + remoteTaget := remoteWriter{ + privateKey: e.remoteRequestSignKey, + clientConstructor: e.Config.ClientConstructor, + commonPrm: e.CommonPrm, nodeInfo: clientNodeInfo, } - var err error - completed := make(chan interface{}) - if poolErr := e.cfg.remotePool.Submit(func() { - defer close(completed) - err = remoteTaget.WriteObject(ctx, obj, e.objMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go new file mode 100644 index 000000000..d5eeddf21 --- /dev/null +++ b/pkg/services/object/common/writer/ec_test.go @@ -0,0 +1,190 @@ +package writer + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "slices" + "strconv" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" + "git.frostfs.info/TrueCloudLab/tzhash/tz" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +type testPlacementBuilder struct { + vectors [][]netmap.NodeInfo +} + +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( + [][]netmap.NodeInfo, error, +) { + arr := slices.Clone(p.vectors[0]) + return [][]netmap.NodeInfo{arr}, nil +} + +type nmKeys struct{} + +func (nmKeys) IsLocalKey(_ []byte) bool { + return false +} + +type clientConstructor struct { + vectors [][]netmap.NodeInfo +} + +func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) { + if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) || + bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) { + return multiAddressClient{err: errors.New("node unavailable")}, nil + } + return multiAddressClient{}, nil +} + +type multiAddressClient struct { + client.MultiAddressClient + err error +} + +func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) { + if c.err != nil { + return nil, c.err + } + return &apiclient.ResObjectPutSingle{}, nil +} + +func (c multiAddressClient) ReportError(error) { +} + +func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error { + return nil +} + +func TestECWriter(t *testing.T) { + // Create container with policy EC 1.1 + cnr := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetECDataCount(1) + x1.SetECParityCount(1) + p1.AddReplicas(x1) + cnr.SetPlacementPolicy(p1) + cnr.SetAttribute("cnr", "cnr1") + + cid := cidtest.ID() + + // Create 4 nodes, 2 nodes for chunks, + // 2 nodes for the case when the first two will fail. + ns, _ := testNodeMatrix(t, []int{4}) + + data := make([]byte, 100) + _, _ = rand.Read(data) + ver := version.Current() + + var csum checksum.Checksum + csum.SetSHA256(sha256.Sum256(data)) + + var csumTZ checksum.Checksum + csumTZ.SetTillichZemor(tz.Sum(csum.Value())) + + obj := objectSDK.New() + obj.SetID(oidtest.ID()) + obj.SetOwnerID(usertest.ID()) + obj.SetContainerID(cid) + obj.SetVersion(&ver) + obj.SetPayload(data) + obj.SetPayloadSize(uint64(len(data))) + obj.SetPayloadChecksum(csum) + obj.SetPayloadHomomorphicHash(csumTZ) + + // Builder return nodes without sort by hrw + builder := &testPlacementBuilder{ + vectors: ns, + } + + ownerKey, err := keys.NewPrivateKey() + require.NoError(t, err) + nodeKey, err := keys.NewPrivateKey() + require.NoError(t, err) + + log, err := logger.NewLogger(logger.Prm{}) + require.NoError(t, err) + + var n nmKeys + ecw := ECWriter{ + Config: &Config{ + NetmapKeys: n, + Logger: log, + ClientConstructor: clientConstructor{vectors: ns}, + KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), + }, + PlacementOpts: append( + []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)}, + placement.WithCopyNumbers(nil)), // copies number ignored for EC + Container: cnr, + Key: &ownerKey.PrivateKey, + Relay: nil, + ObjectMetaValid: true, + } + + err = ecw.WriteObject(context.Background(), obj) + require.NoError(t, err) +} + +func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { + mNodes := make([][]netmap.NodeInfo, len(dim)) + mAddr := make([][]string, len(dim)) + + for i := range dim { + ns := make([]netmap.NodeInfo, dim[i]) + as := make([]string, dim[i]) + + for j := range dim[i] { + a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", + strconv.Itoa(i), + strconv.Itoa(60000+j), + ) + + var ni netmap.NodeInfo + ni.SetNetworkEndpoints(a) + ni.SetPublicKey([]byte(a)) + + var na network.AddressGroup + + err := na.FromIterator(netmapcore.Node(ni)) + require.NoError(t, err) + + as[j] = network.StringifyGroup(na) + + ns[j] = ni + } + + mNodes[i] = ns + mAddr[i] = as + } + + return mNodes, mAddr +} diff --git a/pkg/services/object/put/local.go b/pkg/services/object/common/writer/local.go similarity index 68% rename from pkg/services/object/put/local.go rename to pkg/services/object/common/writer/local.go index 54649adc7..cf3d03275 100644 --- a/pkg/services/object/put/local.go +++ b/pkg/services/object/common/writer/local.go @@ -1,10 +1,12 @@ -package putsvc +package writer import ( "context" "fmt" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) @@ -13,7 +15,7 @@ import ( type ObjectStorage interface { // Put must save passed object // and return any appeared error. - Put(context.Context, *objectSDK.Object) error + Put(context.Context, *objectSDK.Object, bool) error // Delete must delete passed objects // and return any appeared error. Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error @@ -24,19 +26,24 @@ type ObjectStorage interface { IsLocked(context.Context, oid.Address) (bool, error) } -type localTarget struct { - storage ObjectStorage +type LocalTarget struct { + Storage ObjectStorage + Container containerSDK.Container } -func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error { +func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error { + if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil { + return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) + } + switch meta.Type() { case objectSDK.TypeTombstone: - err := t.storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects()) + err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects()) if err != nil { return fmt.Errorf("could not delete objects from tombstone locally: %w", err) } case objectSDK.TypeLock: - err := t.storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects()) + err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects()) if err != nil { return fmt.Errorf("could not lock object from lock objects locally: %w", err) } @@ -44,8 +51,5 @@ func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met // objects that do not change meta storage } - if err := t.storage.Put(ctx, obj); err != nil { - return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) - } return nil } diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/common/writer/remote.go similarity index 92% rename from pkg/services/object/put/remote.go rename to pkg/services/object/common/writer/remote.go index ee8d64e7a..697613ff7 100644 --- a/pkg/services/object/put/remote.go +++ b/pkg/services/object/common/writer/remote.go @@ -1,4 +1,4 @@ -package putsvc +package writer import ( "context" @@ -16,7 +16,7 @@ import ( "google.golang.org/grpc/status" ) -type remoteTarget struct { +type remoteWriter struct { privateKey *ecdsa.PrivateKey commonPrm *util.CommonPrm @@ -41,7 +41,7 @@ type RemotePutPrm struct { obj *objectSDK.Object } -func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error { +func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error { c, err := t.clientConstructor.Get(t.nodeInfo) if err != nil { return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err) @@ -64,7 +64,7 @@ func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _ return t.putStream(ctx, prm) } -func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error { +func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error { _, err := internalclient.PutObject(ctx, prm) if err != nil { return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err) @@ -72,7 +72,7 @@ func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObje return nil } -func (t *remoteTarget) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error { +func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error { _, err := internalclient.PutObjectSingle(ctx, prm) if err != nil { return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err) @@ -113,7 +113,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error { return err } - t := &remoteTarget{ + t := &remoteWriter{ privateKey: key, clientConstructor: s.clientConstructor, } diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go new file mode 100644 index 000000000..d3d2b41b4 --- /dev/null +++ b/pkg/services/object/common/writer/writer.go @@ -0,0 +1,168 @@ +package writer + +import ( + "context" + "crypto/ecdsa" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" + objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" +) + +type MaxSizeSource interface { + // MaxObjectSize returns maximum payload size + // of physically stored object in system. + // + // Must return 0 if value can not be obtained. + MaxObjectSize(context.Context) uint64 +} + +type ClientConstructor interface { + Get(client.NodeInfo) (client.MultiAddressClient, error) +} + +type InnerRing interface { + InnerRingKeys(ctx context.Context) ([][]byte, error) +} + +type FormatValidatorConfig interface { + VerifySessionTokenIssuer() bool +} + +// Config represents a set of static parameters that are established during +// the initialization phase of all services. +type Config struct { + KeyStorage *objutil.KeyStorage + + MaxSizeSrc MaxSizeSource + + LocalStore ObjectStorage + + ContainerSource container.Source + + NetmapSource netmap.Source + + NetmapKeys netmap.AnnouncedKeys + + FormatValidator *object.FormatValidator + + NetworkState netmap.State + + ClientConstructor ClientConstructor + + Logger *logger.Logger + + VerifySessionTokenIssuer bool +} + +type Option func(*Config) + +func WithLogger(l *logger.Logger) Option { + return func(c *Config) { + c.Logger = l + } +} + +func WithVerifySessionTokenIssuer(v bool) Option { + return func(c *Config) { + c.VerifySessionTokenIssuer = v + } +} + +type Params struct { + Config *Config + + Common *objutil.CommonPrm + + Header *objectSDK.Object + + Container containerSDK.Container + + TraverseOpts []placement.Option + + Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error + + SignRequestPrivateKey *ecdsa.PrivateKey +} + +func New(prm *Params) transformer.ObjectWriter { + if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) { + return newECWriter(prm) + } + return newDefaultObjectWriter(prm, false) +} + +func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter { + var relay func(context.Context, NodeDescriptor) error + if prm.Relay != nil { + relay = func(ctx context.Context, node NodeDescriptor) error { + var info client.NodeInfo + + client.NodeInfoFromNetmapElement(&info, node.Info) + + c, err := prm.Config.ClientConstructor.Get(info) + if err != nil { + return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) + } + + return prm.Relay(ctx, info, c) + } + } + + var resetSuccessAfterOnBroadcast bool + traverseOpts := prm.TraverseOpts + if forECPlacement && !prm.Common.LocalOnly() { + // save non-regular and linking object to EC container. + // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc. + traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1))) + resetSuccessAfterOnBroadcast = true + } + + return &distributedWriter{ + cfg: prm.Config, + placementOpts: traverseOpts, + resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast, + nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget { + if node.Local { + return LocalTarget{ + Storage: prm.Config.LocalStore, + Container: prm.Container, + } + } + + rt := &remoteWriter{ + privateKey: prm.SignRequestPrivateKey, + commonPrm: prm.Common, + clientConstructor: prm.Config.ClientConstructor, + } + + client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info) + + return rt + }, + relay: relay, + } +} + +func newECWriter(prm *Params) transformer.ObjectWriter { + return &objectWriterDispatcher{ + ecWriter: &ECWriter{ + Config: prm.Config, + PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC + Container: prm.Container, + Key: prm.SignRequestPrivateKey, + CommonPrm: prm.Common, + Relay: prm.Relay, + }, + repWriter: newDefaultObjectWriter(prm, true), + } +} diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index 88454625d..57e33fde7 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(logs.ServingRequest) + exec.log.Debug(ctx, logs.ServingRequest) if err := exec.executeLocal(ctx); err != nil { - exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) return err } - exec.log.Debug(logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) return nil } diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index c2f92950f..a99ba3586 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" + "slices" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -34,13 +35,13 @@ type execCtx struct { } func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = &logger.Logger{Logger: l.With( + exec.log = l.With( zap.String("request", "DELETE"), zap.Stringer("address", exec.address()), zap.Bool("local", exec.isLocal()), zap.Bool("with session", exec.prm.common.SessionToken() != nil), zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - )} + ) } func (exec *execCtx) isLocal() bool { @@ -83,16 +84,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error { exec.splitInfo = errSplitInfo.SplitInfo() exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) - exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) + exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) if err := exec.collectMembers(ctx); err != nil { return err } - exec.log.Debug(logs.DeleteMembersSuccessfullyCollected) + exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected) return nil case errors.As(err, &errECInfo): - exec.log.Debug(logs.DeleteECObjectReceived) + exec.log.Debug(ctx, logs.DeleteECObjectReceived) return nil } @@ -108,7 +109,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error { func (exec *execCtx) collectMembers(ctx context.Context) error { if exec.splitInfo == nil { - exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY) + exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY) return nil } @@ -131,7 +132,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error { func (exec *execCtx) collectChain(ctx context.Context) error { var chain []oid.ID - exec.log.Debug(logs.DeleteAssemblingChain) + exec.log.Debug(ctx, logs.DeleteAssemblingChain) for prev, withPrev := exec.splitInfo.LastPart(); withPrev; { chain = append(chain, prev) @@ -152,7 +153,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error { } func (exec *execCtx) collectChildren(ctx context.Context) error { - exec.log.Debug(logs.DeleteCollectingChildren) + exec.log.Debug(ctx, logs.DeleteCollectingChildren) children, err := exec.svc.header.children(ctx, exec) if err != nil { @@ -165,7 +166,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error { } func (exec *execCtx) supplementBySplitID(ctx context.Context) error { - exec.log.Debug(logs.DeleteSupplementBySplitID) + exec.log.Debug(ctx, logs.DeleteSupplementBySplitID) chain, err := exec.svc.searcher.splitMembers(ctx, exec) if err != nil { @@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) { for i := range members { for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body if members[i].Equals(incoming[j]) { - incoming = append(incoming[:j], incoming[j+1:]...) + incoming = slices.Delete(incoming, j, j+1) j-- } } diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go index 2c3c47f49..01b2d9b3f 100644 --- a/pkg/services/object/delete/local.go +++ b/pkg/services/object/delete/local.go @@ -10,13 +10,13 @@ import ( ) func (exec *execCtx) executeLocal(ctx context.Context) error { - exec.log.Debug(logs.DeleteFormingTombstoneStructure) + exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure) if err := exec.formTombstone(ctx); err != nil { return err } - exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving) + exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving) return exec.saveTombstone(ctx) } @@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error { ) exec.addMembers([]oid.ID{exec.address().Object()}) - exec.log.Debug(logs.DeleteFormingSplitInfo) + exec.log.Debug(ctx, logs.DeleteFormingSplitInfo) if err := exec.formExtendedInfo(ctx); err != nil { return fmt.Errorf("form extended info: %w", err) diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 0ba21eee3..1c4d7d585 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -27,11 +27,11 @@ type Option func(*cfg) type NetworkInfo interface { netmap.State - // Must return the lifespan of the tombstones + // TombstoneLifetime must return the lifespan of the tombstones // in the FrostFS epochs. TombstoneLifetime() (uint64, error) - // Returns user ID of the local storage node. Result must not be nil. + // LocalNodeID returns user ID of the local storage node. Result must not be nil. // New tombstone objects will have the result as an owner ID if removal is executed w/o a session. LocalNodeID() user.ID } @@ -72,7 +72,7 @@ func New(gs *getsvc.Service, opts ...Option, ) *Service { c := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), header: &headSvcWrapper{s: gs}, searcher: &searchSvcWrapper{s: ss}, placer: &putSvcWrapper{s: ps}, @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))} + c.log = l } } diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go index 10dcd0e87..7146f0361 100644 --- a/pkg/services/object/delete/v2/service.go +++ b/pkg/services/object/delete/v2/service.go @@ -3,8 +3,8 @@ package deletesvc import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Delete operation of Object service v2. diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go index d0db1f543..c57d4562a 100644 --- a/pkg/services/object/delete/v2/util.go +++ b/pkg/services/object/delete/v2/util.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index ba6fddec5..e80132489 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -12,8 +12,8 @@ import ( ) func (r *request) assemble(ctx context.Context) { - if !r.canAssemble() { - r.log.Debug(logs.GetCanNotAssembleTheObject) + if !r.canAssembleComplexObject() { + r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) return } @@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) { // `execCtx` so it should be disabled there. r.disableForwarding() - r.log.Debug(logs.GetTryingToAssembleTheObject) + r.log.Debug(ctx, logs.GetTryingToAssembleTheObject) r.prm.common = r.prm.common.WithLocalOnly(false) - assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r) + assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly()) - r.log.Debug(logs.GetAssemblingSplittedObject, + r.log.Debug(ctx, logs.GetAssemblingSplittedObject, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) - defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted, + defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) obj, err := assembler.Assemble(ctx, r.prm.objWriter) if err != nil { - r.log.Warn(logs.GetFailedToAssembleSplittedObject, + r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject, zap.Error(err), zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.statusError.err + return detachedExecutor.err } diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go index 7bbd9ca1e..59dd7fd93 100644 --- a/pkg/services/object/get/assembleec.go +++ b/pkg/services/object/get/assembleec.go @@ -11,8 +11,8 @@ import ( ) func (r *request) assembleEC(ctx context.Context) { - if r.isRaw() && r.isLocal() { - r.log.Debug(logs.GetCanNotAssembleTheObject) + if r.isRaw() { + r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) return } @@ -34,29 +34,29 @@ func (r *request) assembleEC(ctx context.Context) { // `execCtx` so it should be disabled there. r.disableForwarding() - r.log.Debug(logs.GetTryingToAssembleTheECObject) + r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } r.prm.common = r.prm.common.WithLocalOnly(false) - assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.isRaw(), r.traverserGenerator, r.curProcEpoch) + assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) - r.log.Debug(logs.GetAssemblingECObject, + r.log.Debug(ctx, logs.GetAssemblingECObject, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) - defer r.log.Debug(logs.GetAssemblingECObjectCompleted, + defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) obj, err := assembler.Assemble(ctx, r.prm.objWriter) if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) { - r.log.Warn(logs.GetFailedToAssembleECObject, + r.log.Warn(ctx, logs.GetFailedToAssembleECObject, zap.Error(err), zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index 025296ec7..b24c9417b 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -2,6 +2,7 @@ package getsvc import ( "context" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -19,6 +20,7 @@ type assembler struct { splitInfo *objectSDK.SplitInfo rng *objectSDK.Range objGetter objectGetter + head bool currentOffset uint64 @@ -30,18 +32,23 @@ func newAssembler( splitInfo *objectSDK.SplitInfo, rng *objectSDK.Range, objGetter objectGetter, + head bool, ) *assembler { return &assembler{ addr: addr, rng: rng, splitInfo: splitInfo, objGetter: objGetter, + head: head, } } // Assemble assembles splitted large object and writes it's content to ObjectWriter. // It returns parent object. func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { + if a.head { + return a.assembleHeader(ctx, writer) + } sourceObjectID, ok := a.getLastPartOrLinkObjectID() if !ok { return nil, objectSDK.NewSplitInfoError(a.splitInfo) @@ -53,15 +60,23 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS if previousID == nil && len(childrenIDs) == 0 { return nil, objectSDK.NewSplitInfoError(a.splitInfo) } + if len(childrenIDs) > 0 { - if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer) + } else { + err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer) } } else { - if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer) + } else { + err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer) } } + if err != nil { + return nil, err + } return a.parentObject, nil } @@ -147,26 +162,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD } func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } - return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true) - } - - if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { return err } - return writer.WriteChunk(ctx, a.parentObject.Payload()) + return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true) } func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + return err } - if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil { return err } @@ -176,16 +181,9 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev return nil } -func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error { - withRng := len(partRanges) > 0 && a.rng != nil - +func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error { for i := range partIDs { - var r *objectSDK.Range - if withRng { - r = &partRanges[i] - } - - _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer) + _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer) if err != nil { return err } @@ -194,22 +192,13 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec } func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, rngs, err := a.buildChain(ctx, prevID) + chain, err := a.buildChain(ctx, prevID) if err != nil { return err } - reverseRngs := len(rngs) > 0 - - for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { - chain[left], chain[right] = chain[right], chain[left] - - if reverseRngs { - rngs[left], rngs[right] = rngs[right], rngs[left] - } - } - - return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false) + slices.Reverse(chain) + return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) } func (a *assembler) isChild(obj *objectSDK.Object) bool { @@ -217,63 +206,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool { return parent == nil || equalAddresses(a.addr, object.AddressOf(parent)) } -func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { +func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) { var ( chain []oid.ID - rngs []objectSDK.Range - from = a.rng.GetOffset() - to = from + a.rng.GetLength() hasPrev = true ) // fill the chain end-to-start for hasPrev { - // check that only for "range" requests, - // for `GET` it stops via the false `withPrev` - if a.rng != nil && a.currentOffset <= from { - break - } - head, err := a.objGetter.HeadObject(ctx, prevID) if err != nil { - return nil, nil, err + return nil, err } if !a.isChild(head) { - return nil, nil, errParentAddressDiffers + return nil, errParentAddressDiffers } - if a.rng != nil { - sz := head.PayloadSize() - - a.currentOffset -= sz - - if a.currentOffset < to { - off := uint64(0) - if from > a.currentOffset { - off = from - a.currentOffset - sz -= from - a.currentOffset - } - - if to < a.currentOffset+off+sz { - sz = to - off - a.currentOffset - } - - index := len(rngs) - rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(off) - rngs[index].SetLength(sz) - - id, _ := head.ID() - chain = append(chain, id) - } - } else { - id, _ := head.ID() - chain = append(chain, id) - } + id, _ := head.ID() + chain = append(chain, id) prevID, hasPrev = head.PreviousID() } - return chain, rngs, nil + return chain, nil } diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go new file mode 100644 index 000000000..ff213cb82 --- /dev/null +++ b/pkg/services/object/get/assembler_head.go @@ -0,0 +1,45 @@ +package getsvc + +import ( + "context" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { + var sourceObjectIDs []oid.ID + sourceObjectID, ok := a.splitInfo.Link() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + sourceObjectID, ok = a.splitInfo.LastPart() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + if len(sourceObjectIDs) == 0 { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + for _, sourceObjectID = range sourceObjectIDs { + obj, err := a.getParent(ctx, sourceObjectID, writer) + if err == nil { + return obj, nil + } + } + return nil, objectSDK.NewSplitInfoError(a.splitInfo) +} + +func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { + obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) + if err != nil { + return nil, err + } + parent := obj.Parent() + if parent == nil { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + if err := writer.WriteHeader(ctx, parent); err != nil { + return nil, err + } + return obj, nil +} diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go new file mode 100644 index 000000000..780693c40 --- /dev/null +++ b/pkg/services/object/get/assembler_range.go @@ -0,0 +1,87 @@ +package getsvc + +import ( + "context" + "slices" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + return err + } + return writer.WriteChunk(ctx, a.parentObject.Payload()) +} + +func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil { + return err + } + if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part + return err + } + return nil +} + +func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error { + for i := range partIDs { + _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer) + if err != nil { + return err + } + } + return nil +} + +func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { + chain, rngs, err := a.buildChainRange(ctx, prevID) + if err != nil { + return err + } + + slices.Reverse(chain) + slices.Reverse(rngs) + return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) +} + +func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { + var ( + chain []oid.ID + rngs []objectSDK.Range + from = a.rng.GetOffset() + to = from + a.rng.GetLength() + + hasPrev = true + ) + + // fill the chain end-to-start + for hasPrev && from < a.currentOffset { + head, err := a.objGetter.HeadObject(ctx, prevID) + if err != nil { + return nil, nil, err + } + if !a.isChild(head) { + return nil, nil, errParentAddressDiffers + } + + nextOffset := a.currentOffset - head.PayloadSize() + clampedFrom := max(from, nextOffset) + clampedTo := min(to, a.currentOffset) + if clampedFrom < clampedTo { + index := len(rngs) + rngs = append(rngs, objectSDK.Range{}) + rngs[index].SetOffset(clampedFrom - nextOffset) + rngs[index].SetLength(clampedTo - clampedFrom) + + id, _ := head.ID() + chain = append(chain, id) + } + + a.currentOffset = nextOffset + prevID, hasPrev = head.PreviousID() + } + + return chain, rngs, nil +} diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index a4021ee5e..e0a7e1da6 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -34,10 +34,8 @@ type assemblerec struct { rng *objectSDK.Range remoteStorage ecRemoteStorage localStorage localStorage - cs container.Source log *logger.Logger head bool - raw bool traverserGenerator traverserGenerator epoch uint64 } @@ -48,10 +46,8 @@ func newAssemblerEC( rng *objectSDK.Range, remoteStorage ecRemoteStorage, localStorage localStorage, - cs container.Source, log *logger.Logger, head bool, - raw bool, tg traverserGenerator, epoch uint64, ) *assemblerec { @@ -61,10 +57,8 @@ func newAssemblerEC( ecInfo: ecInfo, remoteStorage: remoteStorage, localStorage: localStorage, - cs: cs, log: log, head: head, - raw: raw, traverserGenerator: tg, epoch: epoch, } @@ -74,9 +68,6 @@ func newAssemblerEC( // It returns parent object. func (a *assemblerec) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { switch { - case a.raw: - err := a.reconstructRawError(ctx) - return nil, err case a.head: return a.reconstructHeader(ctx, writer) case a.rng != nil: @@ -134,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) { objID := a.addr.Object() - trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch) + trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch) if err != nil { return nil, err } @@ -149,56 +140,6 @@ func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bo return c.Reconstruct(parts) } -func (a *assemblerec) reconstructRawError(ctx context.Context) error { - chunks := make(map[string]objectSDK.ECChunk) - var chunksGuard sync.Mutex - for _, ch := range a.ecInfo.localChunks { - chunks[string(ch.ID.GetValue())] = ch - } - - objID := a.addr.Object() - trav, _, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch) - if err != nil { - return err - } - - eg, ctx := errgroup.WithContext(ctx) - for { - batch := trav.Next() - if len(batch) == 0 { - break - } - for _, node := range batch { - var info client.NodeInfo - client.NodeInfoFromNetmapElement(&info, node) - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if _, found := a.ecInfo.remoteChunks[string(info.PublicKey())]; found { - return nil - } - - nodeChunks := a.tryGetChunkListFromNode(ctx, info) - - chunksGuard.Lock() - defer chunksGuard.Unlock() - for _, ch := range nodeChunks { - chunks[string(ch.ID.GetValue())] = ch - } - return nil - }) - } - } - if err = eg.Wait(); err != nil { - return err - } - return createECInfoErr(chunks) -} - func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Traverser, cnr *container.Container) []*objectSDK.Object { dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy()) parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy()) @@ -214,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount) if err != nil { - a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err)) + a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err)) } return parts } @@ -226,7 +167,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem eg.SetLimit(dataCount) for _, ch := range a.ecInfo.localChunks { - ch := ch eg.Go(func() error { select { case <-ctx.Done(): @@ -289,24 +229,22 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object var objID oid.ID err := objID.ReadFromV2(ch.ID) if err != nil { - a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) + a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) return nil } var addr oid.Address - addr.SetContainer(addr.Container()) + addr.SetContainer(a.addr.Container()) addr.SetObject(objID) var object *objectSDK.Object if a.head { object, err = a.localStorage.Head(ctx, addr, false) - if err != nil { - a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil + if err != nil && !errors.Is(err, context.Canceled) { + a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) } } else { object, err = a.localStorage.Get(ctx, addr) - if err != nil { - a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil + if err != nil && !errors.Is(err, context.Canceled) { + a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) } } return object @@ -319,11 +257,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N var errECInfo *objectSDK.ECInfoError _, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true) if err == nil { - a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey()))) + a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey()))) return nil } if !errors.As(err, &errECInfo) { - a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) return nil } result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks)) @@ -337,7 +275,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli var objID oid.ID err := objID.ReadFromV2(ch.ID) if err != nil { - a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) + a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) return nil } var addr oid.Address @@ -346,24 +284,14 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli var object *objectSDK.Object if a.head { object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) - if err != nil { - a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil + if err != nil && !errors.Is(err, context.Canceled) { + a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) } } else { object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) - if err != nil { - a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil + if err != nil && !errors.Is(err, context.Canceled) { + a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) } } return object } - -func createECInfoErr(chunks map[string]objectSDK.ECChunk) *objectSDK.ECInfoError { - info := objectSDK.NewECInfo() - for _, ch := range chunks { - info.AddChunk(ch) - } - return objectSDK.NewECInfoError(info) -} diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index d22b14192..dfb31133c 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -10,32 +10,25 @@ import ( func (r *request) executeOnContainer(ctx context.Context) { if r.isLocal() { - r.log.Debug(logs.GetReturnResultDirectly) + r.log.Debug(ctx, logs.GetReturnResultDirectly) return } lookupDepth := r.netmapLookupDepth() - r.log.Debug(logs.TryingToExecuteInContainer, + r.log.Debug(ctx, logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } - for { - if r.processCurrentEpoch(ctx) { - break - } - - // check the maximum depth has been reached - if lookupDepth == 0 { - break - } + localStatus := r.status + for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { lookupDepth-- // go to the previous epoch @@ -43,12 +36,12 @@ func (r *request) executeOnContainer(ctx context.Context) { } } -func (r *request) processCurrentEpoch(ctx context.Context) bool { - r.log.Debug(logs.ProcessEpoch, +func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool { + r.log.Debug(ctx, logs.ProcessEpoch, zap.Uint64("number", r.curProcEpoch), ) - traverser, ok := r.generateTraverser(r.address()) + traverser, ok := r.generateTraverser(ctx, r.address()) if !ok { return true } @@ -56,12 +49,16 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() - r.status = statusUndefined + if localStatus == statusEC { // possible only for raw == true and local == false + r.status = statusEC + } else { + r.status = statusUndefined + } for { addrs := traverser.Next() if len(addrs) == 0 { - r.log.Debug(logs.NoMoreNodesAbortPlacementIteration) + r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) return false } @@ -69,7 +66,7 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool { for i := range addrs { select { case <-ctx.Done(): - r.log.Debug(logs.InterruptPlacementIterationByContext, + r.log.Debug(ctx, logs.InterruptPlacementIterationByContext, zap.Error(ctx.Err()), ) @@ -85,7 +82,7 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool { client.NodeInfoFromNetmapElement(&info, addrs[i]) if r.processNode(ctx, info) { - r.log.Debug(logs.GetCompletingTheOperation) + r.log.Debug(ctx, logs.GetCompletingTheOperation) return true } } diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 5a57bc56e..3a50308c2 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -2,9 +2,11 @@ package getsvc import ( "context" + "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" ) @@ -85,41 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.statusError.err + return exec.err } -func (exec *request) execute(ctx context.Context) { - exec.log.Debug(logs.ServingRequest) +func (r *request) execute(ctx context.Context) { + r.log.Debug(ctx, logs.ServingRequest) // perform local operation - exec.executeLocal(ctx) + r.executeLocal(ctx) - exec.analyzeStatus(ctx, true) + r.analyzeStatus(ctx, true) } -func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch exec.status { + switch r.status { case statusOK: - exec.log.Debug(logs.OperationFinishedSuccessfully) + r.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved) + r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug(logs.GetRequestedObjectIsVirtual) - exec.assemble(ctx) + r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + r.assemble(ctx) case statusOutOfRange: - exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds) + r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - exec.log.Debug(logs.GetRequestedObjectIsEC) - exec.assembleEC(ctx) + r.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if r.isRaw() && execCnr { + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) + } + r.assembleEC(ctx) default: - exec.log.Debug(logs.OperationFinishedWithError, - zap.Error(exec.err), + r.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(r.err), ) + var errAccessDenied *apistatus.ObjectAccessDenied + if execCnr && errors.As(r.err, &errAccessDenied) { + // Local get can't return access denied error, so this error was returned by + // write to the output stream. So there is no need to try to find object on other nodes. + return + } if execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 29a15ba78..3efc72065 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -63,7 +63,7 @@ type testClient struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -79,7 +79,7 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { opts := make([]placement.Option, 0, 4) opts = append(opts, placement.ForContainer(g.c), @@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui opts = append(opts, placement.ForObject(*obj)) } - t, err := placement.NewTraverser(opts...) + t, err := placement.NewTraverser(context.Background(), opts...) return t, &containerCore.Container{ Value: g.c, }, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) @@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), @@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) { ids := make([]oid.ID, 0, ln) payload := make([]byte, 0, ln*10) - for i := 0; i < ln; i++ { + for i := range ln { ids = append(ids, curID) addr.SetObject(curID) @@ -730,7 +730,7 @@ func TestGetRemoteSmall(t *testing.T) { t.Run("VIRTUAL", func(t *testing.T) { testHeadVirtual := func(svc *Service, addr oid.Address, i *objectSDK.SplitInfo) { - headPrm := newHeadPrm(false, nil) + headPrm := newHeadPrm(true, nil) headPrm.WithAddress(addr) errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo()) @@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) @@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index b8497d7d1..83ef54744 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -6,12 +6,12 @@ import ( "fmt" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -28,14 +28,14 @@ type containerStorage struct { cnt *container.Container } -func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) { coreCnt := coreContainer.Container{ Value: *cs.cnt, } return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } @@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) { clients: clients, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload()) diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go index 1cd5e549c..cfabb082f 100644 --- a/pkg/services/object/get/local.go +++ b/pkg/services/object/get/local.go @@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) { r.status = statusUndefined r.err = err - r.log.Debug(logs.GetLocalGetFailed, zap.Error(err)) + r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go index cbdb7a3e2..94c07381c 100644 --- a/pkg/services/object/get/prm.go +++ b/pkg/services/object/get/prm.go @@ -124,6 +124,10 @@ func (p *commonPrm) SetRequestForwarder(f RequestForwarder) { p.forwarder = f } +func (p *commonPrm) SetSignerKey(signerKey *ecdsa.PrivateKey) { + p.signerKey = signerKey +} + // WithAddress sets object address to be read. func (p *commonPrm) WithAddress(addr oid.Address) { p.addr = addr diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index 4dee15242..78ca5b5e3 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -18,9 +18,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode") defer span.End() - r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) + r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) - rs, ok := r.getRemoteStorage(info) + rs, ok := r.getRemoteStorage(ctx, info) if !ok { return true } @@ -31,12 +31,21 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { var errECInfo *objectSDK.ECInfoError var errRemoved *apistatus.ObjectAlreadyRemoved var errOutOfRange *apistatus.ObjectOutOfRange + var errAccessDenied *apistatus.ObjectAccessDenied switch { default: - r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err)) - r.status = statusUndefined - r.err = new(apistatus.ObjectNotFound) + r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err)) + if r.status != statusEC { + // for raw requests, continue to collect other parts + r.status = statusUndefined + if errors.As(err, &errAccessDenied) { + r.err = err + } else if r.err == nil || !errors.As(r.err, &errAccessDenied) { + r.err = new(apistatus.ObjectNotFound) + } + } + return false case err == nil: r.status = statusOK r.err = nil @@ -48,22 +57,28 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { r.collectedObject = obj r.writeCollectedObject(ctx) } + return true case errors.As(err, &errRemoved): r.status = statusINHUMED r.err = errRemoved + return true case errors.As(err, &errOutOfRange): r.status = statusOutOfRange r.err = errOutOfRange + return true case errors.As(err, &errSplitInfo): r.status = statusVIRTUAL mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo()) r.err = objectSDK.NewSplitInfoError(r.infoSplit) + return true case errors.As(err, &errECInfo): r.status = statusEC r.err = r.infoEC.addRemote(string(info.PublicKey()), errECInfo.ECInfo()) + if r.isRaw() { + return false // continue to collect all parts + } + return true } - - return r.status != statusUndefined } func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) { diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go index 0df67dec9..2c64244cf 100644 --- a/pkg/services/object/get/remote_getter.go +++ b/pkg/services/object/get/remote_getter.go @@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob if err != nil { return nil, err } - epoch, err := g.es.Epoch() + epoch, err := g.es.Epoch(ctx) if err != nil { return nil, err } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index 9ddfeddf2..268080486 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) { req = "GET_RANGE" } - r.log = &logger.Logger{Logger: l.With( + r.log = l.With( zap.String("request", req), zap.Stringer("address", r.address()), zap.Bool("raw", r.isRaw()), zap.Bool("local", r.isLocal()), zap.Bool("with session", r.prm.common.SessionToken() != nil), zap.Bool("with bearer", r.prm.common.BearerToken() != nil), - )} + ) } func (r *request) isLocal() bool { @@ -88,8 +88,8 @@ func (r *request) key() (*ecdsa.PrivateKey, error) { return r.keyStore.GetKey(sessionInfo) } -func (r *request) canAssemble() bool { - return !r.isRaw() && !r.headOnly() +func (r *request) canAssembleComplexObject() bool { + return !r.isRaw() } func (r *request) splitInfo() *objectSDK.SplitInfo { @@ -116,20 +116,20 @@ func (r *request) netmapLookupDepth() uint64 { return r.prm.common.NetmapLookupDepth() } -func (r *request) initEpoch() bool { +func (r *request) initEpoch(ctx context.Context) bool { r.curProcEpoch = r.netmapEpoch() if r.curProcEpoch > 0 { return true } - e, err := r.epochSource.Epoch() + e, err := r.epochSource.Epoch(ctx) switch { default: r.status = statusUndefined r.err = err - r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) + r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) return false case err == nil: @@ -138,17 +138,17 @@ func (r *request) initEpoch() bool { } } -func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) { +func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() - t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) + t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch) switch { default: r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) return nil, false case err == nil: @@ -156,13 +156,13 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo } } -func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) { +func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) { rs, err := r.remoteStorageConstructor.Get(info) if err != nil { r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient) + r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient) return nil, false } @@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool { r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil @@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index 3413abeb7..a103f5a7f 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -34,7 +34,7 @@ func New( result := &Service{ keyStore: ks, epochSource: es, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), localStorage: &engineLocalStorage{ engine: e, }, @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))} + s.log = l } } diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go index 9669afdba..664366d1b 100644 --- a/pkg/services/object/get/types.go +++ b/pkg/services/object/get/types.go @@ -20,11 +20,11 @@ import ( ) type epochSource interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } type traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } type keyStorage interface { diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go index 213455e10..aaa09b891 100644 --- a/pkg/services/object/get/v2/errors.go +++ b/pkg/services/object/get/v2/errors.go @@ -4,8 +4,8 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" ) var ( diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go index 774f98643..60fcd7fbf 100644 --- a/pkg/services/object/get/v2/get_forwarder.go +++ b/pkg/services/object/get/v2/get_forwarder.go @@ -7,28 +7,30 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) type getRequestForwarder struct { - OnceResign sync.Once - OnceHeaderSending sync.Once - GlobalProgress int - Key *ecdsa.PrivateKey - Request *objectV2.GetRequest - Stream *streamObjectWriter + OnceResign sync.Once + GlobalProgress int + Key *ecdsa.PrivateKey + Request *objectV2.GetRequest + Stream *streamObjectWriter + + headerSent bool + headerSentGuard sync.Mutex } func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) { @@ -83,13 +85,15 @@ func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetOb obj.SetSignature(v.GetSignature()) obj.SetHeader(v.GetHeader()) - var err error - f.OnceHeaderSending.Do(func() { - err = f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)) - }) - if err != nil { + f.headerSentGuard.Lock() + defer f.headerSentGuard.Unlock() + if f.headerSent { + return nil + } + if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil { return errCouldNotWriteObjHeader(err) } + f.headerSent = true return nil } diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go index 10ecfc4a3..a44616fc9 100644 --- a/pkg/services/object/get/v2/get_range_forwarder.go +++ b/pkg/services/object/get/v2/get_range_forwarder.go @@ -7,15 +7,15 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index e97b60f66..308ccd512 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -5,15 +5,15 @@ import ( "encoding/hex" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -22,7 +22,7 @@ import ( // GetRangeHash calls internal service and returns v2 response. func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - forward, err := s.needToForwardGetRangeHashRequest(req) + forward, err := s.needToForwardGetRangeHashRequest(ctx, req) if err != nil { return nil, err } @@ -48,7 +48,7 @@ type getRangeForwardParams struct { address oid.Address } -func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { +func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { if req.GetMetaHeader().GetTTL() <= 1 { return getRangeForwardParams{}, nil } @@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq } result.address = addr - cont, err := s.contSource.Get(addr.Container()) + cont, err := s.contSource.Get(ctx, addr.Container()) if err != nil { return result, fmt.Errorf("(%T) could not get container: %w", s, err) } - epoch, err := s.netmapSource.Epoch() + epoch, err := s.netmapSource.Epoch(ctx) if err != nil { return result, fmt.Errorf("(%T) could not get epoch: %w", s, err) } - nm, err := s.netmapSource.GetNetMapByEpoch(epoch) + nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch) if err != nil { return result, fmt.Errorf("(%T) could not get netmap: %w", s, err) } @@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq builder := placement.NewNetworkMapBuilder(nm) objectID := addr.Object() - nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy()) + nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy()) if err != nil { return result, fmt.Errorf("(%T) could not build object placement: %w", s, err) } @@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2. var addrGr network.AddressGroup if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil { - s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) + s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) continue } var extAddr network.AddressGroup if len(node.ExternalAddresses()) > 0 { if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil { - s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) + s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) continue } } @@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2. if firstErr == nil { firstErr = err } - s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode, + s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())), zap.Stringer("address", params.address), zap.Error(err)) } - s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr)) + s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr)) if firstErr != nil { return nil, firstErr } diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go index 5e16008b8..56056398d 100644 --- a/pkg/services/object/get/v2/head_forwarder.go +++ b/pkg/services/object/get/v2/head_forwarder.go @@ -5,15 +5,15 @@ import ( "crypto/ecdsa" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index edd19b441..0ec8912fd 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -4,7 +4,6 @@ import ( "context" "errors" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -12,6 +11,7 @@ import ( getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" ) @@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service, netmapSource: netmapSource, announcedKeys: announcedKeys, contSource: contSource, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), } for i := range opts { @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))} + c.log = l } } diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index ce9a5c767..0d73bcd4d 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -3,8 +3,8 @@ package getsvc import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" ) @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) + return s.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index 852c2aec3..e699a3779 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -3,19 +3,20 @@ package getsvc import ( "context" "crypto/sha256" + "errors" "hash" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status" clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -182,9 +183,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(func() hash.Hash { - return sha256.New() - }) + p.SetHashGenerator(sha256.New) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() @@ -360,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - res, err = f(ctx, addr, c, key) + // non-status logic error that could be returned + // from the SDK client; should not be considered + // as a connection error + var siErr *objectSDK.SplitInfoError + var eiErr *objectSDK.ECInfoError + + stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) + + if stop || firstErr == nil { + firstErr = err + } + return }) diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go index 2c405070d..3e8832640 100644 --- a/pkg/services/object/internal/client/client.go +++ b/pkg/services/object/internal/client/client.go @@ -7,9 +7,11 @@ import ( "errors" "fmt" "io" + "strconv" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -31,6 +33,8 @@ type commonPrm struct { local bool xHeaders []string + + netmapEpoch uint64 } // SetClient sets base client for ForstFS API communication. @@ -73,6 +77,14 @@ func (x *commonPrm) SetXHeaders(hs []string) { x.xHeaders = hs } +func (x *commonPrm) calculateXHeaders() []string { + hs := x.xHeaders + if x.netmapEpoch != 0 { + hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10)) + } + return hs +} + type readPrmCommon struct { commonPrm } @@ -80,8 +92,8 @@ type readPrmCommon struct { // SetNetmapEpoch sets the epoch number to be used to locate the objectSDK. // // By default current epoch on the server will be used. -func (x *readPrmCommon) SetNetmapEpoch(_ uint64) { - // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465 +func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) { + x.netmapEpoch = epoch } // GetObjectPrm groups parameters of GetObject operation. @@ -139,7 +151,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Key = prm.key @@ -233,7 +245,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams) if err == nil { @@ -326,7 +338,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Length = prm.ln @@ -390,7 +402,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { defer span.End() prmCli := client.PrmObjectPutInit{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -437,7 +449,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro } prmCli := client.PrmObjectPutSingle{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -496,7 +508,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes prm.cliPrm.Local = prm.local prm.cliPrm.Session = prm.tokenSession prm.cliPrm.BearerToken = prm.tokenBearer - prm.cliPrm.XHeaders = prm.xHeaders + prm.cliPrm.XHeaders = prm.calculateXHeaders() prm.cliPrm.Key = prm.key rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm) diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go index eba716976..1e0a7ef90 100644 --- a/pkg/services/object/internal/key.go +++ b/pkg/services/object/internal/key.go @@ -3,8 +3,8 @@ package internal import ( "bytes" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) // VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not. diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index f972f43ae..6a6ee0f0f 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,8 +4,9 @@ import ( "context" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ( @@ -27,8 +28,14 @@ type ( start time.Time } + patchStreamMetric struct { + stream PatchObjectStream + metrics MetricRegister + start time.Time + } + MetricRegister interface { - AddRequestDuration(string, time.Duration, bool) + AddRequestDuration(string, time.Duration, bool, string) AddPayloadSize(string, int) } ) @@ -45,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er if m.enabled { t := time.Now() defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) }() err = m.next.Get(req, &getStreamMetric{ ServerStream: stream, @@ -58,11 +65,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er return } -func (m MetricCollector) Put() (PutObjectStream, error) { +func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) { if m.enabled { t := time.Now() - stream, err := m.next.Put() + stream, err := m.next.Put(ctx) if err != nil { return nil, err } @@ -73,7 +80,25 @@ func (m MetricCollector) Put() (PutObjectStream, error) { start: t, }, nil } - return m.next.Put() + return m.next.Put(ctx) +} + +func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) { + if m.enabled { + t := time.Now() + + stream, err := m.next.Patch(ctx) + if err != nil { + return nil, err + } + + return &patchStreamMetric{ + stream: stream, + metrics: m.metrics, + start: t, + }, nil + } + return m.next.Patch(ctx) } func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) { @@ -82,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl res, err := m.next.PutSingle(ctx, request) - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil) + m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) if err == nil { m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) } @@ -98,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) res, err := m.next.Head(ctx, request) - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -111,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) err := m.next.Search(req, stream) - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -124,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque res, err := m.next.Delete(ctx, request) - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } return m.next.Delete(ctx, request) @@ -136,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR err := m.next.GetRange(req, stream) - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -149,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa res, err := m.next.GetRangeHash(ctx, request) - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -185,7 +210,21 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) + + return res, err +} + +func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error { + s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk())) + + return s.stream.Send(ctx, req) +} + +func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { + res, err := s.stream.CloseAndRecv(ctx) + + s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go new file mode 100644 index 000000000..cb3f7c342 --- /dev/null +++ b/pkg/services/object/patch/range_provider.go @@ -0,0 +1,75 @@ +package patchsvc + +import ( + "context" + "crypto/ecdsa" + "io" + + getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" + objectUtil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + patcherSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher" +) + +func (p *pipeChunkWriter) WriteChunk(_ context.Context, chunk []byte) error { + _, err := p.wr.Write(chunk) + return err +} + +type rangeProvider struct { + getSvc *getsvc.Service + + addr oid.Address + + commonPrm *objectUtil.CommonPrm + + localNodeKey *ecdsa.PrivateKey +} + +var _ patcherSDK.RangeProvider = (*rangeProvider)(nil) + +func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader { + // Remote GetRange request to a container node uses an SDK-client that fails range validation + // with zero-length. However, from the patcher's point of view, such request is still valid. + if rng.GetLength() == 0 { + return &nopReader{} + } + + pipeReader, pipeWriter := io.Pipe() + + var rngPrm getsvc.RangePrm + rngPrm.SetSignerKey(r.localNodeKey) + rngPrm.SetCommonParameters(r.commonPrm) + + rngPrm.WithAddress(r.addr) + rngPrm.SetChunkWriter(&pipeChunkWriter{ + wr: pipeWriter, + }) + rngPrm.SetRange(rng) + + getRangeErr := make(chan error) + + go func() { + defer pipeWriter.Close() + + select { + case <-ctx.Done(): + pipeWriter.CloseWithError(ctx.Err()) + case err := <-getRangeErr: + pipeWriter.CloseWithError(err) + } + }() + + go func() { + getRangeErr <- r.getSvc.GetRange(ctx, rngPrm) + }() + + return pipeReader +} + +type nopReader struct{} + +func (nopReader) Read(_ []byte) (int, error) { + return 0, io.EOF +} diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go new file mode 100644 index 000000000..5d298bfed --- /dev/null +++ b/pkg/services/object/patch/service.go @@ -0,0 +1,41 @@ +package patchsvc + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" +) + +// Service implements Put operation of Object service v2. +type Service struct { + *objectwriter.Config + + getSvc *getsvc.Service +} + +// NewService constructs Service instance from provided options. +// +// Patch service can use the same objectwriter.Config initializied by Put service. +func NewService(cfg *objectwriter.Config, + getSvc *getsvc.Service, +) *Service { + return &Service{ + Config: cfg, + + getSvc: getSvc, + } +} + +// Patch calls internal service and returns v2 object streamer. +func (s *Service) Patch() (object.PatchObjectStream, error) { + nodeKey, err := s.KeyStorage.GetKey(nil) + if err != nil { + return nil, err + } + + return &Streamer{ + Config: s.Config, + getSvc: s.getSvc, + localNodeKey: nodeKey, + }, nil +} diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go new file mode 100644 index 000000000..ff13b1d3e --- /dev/null +++ b/pkg/services/object/patch/streamer.go @@ -0,0 +1,243 @@ +package patchsvc + +import ( + "context" + "crypto/ecdsa" + "errors" + "fmt" + "io" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher" +) + +// Streamer for the patch handler is a pipeline that merges two incoming streams of patches +// and original object payload chunks. The merged result is fed to Put stream target. +type Streamer struct { + *objectwriter.Config + + // Patcher must be initialized at first Streamer.Send call. + patcher patcher.PatchApplier + + nonFirstSend bool + + getSvc *getsvc.Service + + localNodeKey *ecdsa.PrivateKey +} + +type pipeChunkWriter struct { + wr *io.PipeWriter +} + +type headResponseWriter struct { + body *objectV2.HeadResponseBody +} + +func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error { + w.body.SetHeaderPart(toFullObjectHeader(hdr)) + return nil +} + +func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart { + obj := hdr.ToV2() + + hs := new(objectV2.HeaderWithSignature) + hs.SetHeader(obj.GetHeader()) + hs.SetSignature(obj.GetSignature()) + + return hs +} + +func isLinkObject(hdr *objectV2.HeaderWithSignature) bool { + split := hdr.GetHeader().GetSplit() + return len(split.GetChildren()) > 0 && split.GetParent() != nil +} + +func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool { + return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil +} + +func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { + hdrWithSig, addr, err := s.readHeader(ctx, req) + if err != nil { + return err + } + + if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular { + return errors.New("non-regular object can't be patched") + } + if isLinkObject(hdrWithSig) { + return errors.New("linking object can't be patched") + } + if isComplexObjectPart(hdrWithSig) { + return errors.New("complex object parts can't be patched") + } + + commonPrm, err := util.CommonPrmFromV2(req) + if err != nil { + return err + } + commonPrm.WithLocalOnly(false) + + rangeProvider := &rangeProvider{ + getSvc: s.getSvc, + + addr: addr, + + commonPrm: commonPrm, + + localNodeKey: s.localNodeKey, + } + + hdr := hdrWithSig.GetHeader() + oV2 := new(objectV2.Object) + hV2 := new(objectV2.Header) + oV2.SetHeader(hV2) + oV2.GetHeader().SetContainerID(hdr.GetContainerID()) + oV2.GetHeader().SetPayloadLength(hdr.GetPayloadLength()) + oV2.GetHeader().SetAttributes(hdr.GetAttributes()) + + ownerID, err := newOwnerID(req.GetVerificationHeader()) + if err != nil { + return err + } + oV2.GetHeader().SetOwnerID(ownerID) + + target, err := target.New(ctx, objectwriter.Params{ + Config: s.Config, + Common: commonPrm, + Header: objectSDK.NewFromV2(oV2), + }) + if err != nil { + return fmt.Errorf("target creation: %w", err) + } + + patcherPrm := patcher.Params{ + Header: objectSDK.NewFromV2(oV2), + + RangeProvider: rangeProvider, + + ObjectWriter: target, + } + + s.patcher = patcher.New(patcherPrm) + return nil +} + +func (s *Streamer) readHeader(ctx context.Context, req *objectV2.PatchRequest) (hdrWithSig *objectV2.HeaderWithSignature, addr oid.Address, err error) { + addrV2 := req.GetBody().GetAddress() + if addrV2 == nil { + err = errors.New("patch request has nil-address") + return + } + + if err = addr.ReadFromV2(*addrV2); err != nil { + err = fmt.Errorf("read address error: %w", err) + return + } + + commonPrm, err := util.CommonPrmFromV2(req) + if err != nil { + return + } + commonPrm.WithLocalOnly(false) + + var p getsvc.HeadPrm + p.SetSignerKey(s.localNodeKey) + p.SetCommonParameters(commonPrm) + + resp := new(objectV2.HeadResponse) + resp.SetBody(new(objectV2.HeadResponseBody)) + + p.WithAddress(addr) + p.SetHeaderWriter(&headResponseWriter{ + body: resp.GetBody(), + }) + + err = s.getSvc.Head(ctx, p) + if err != nil { + err = fmt.Errorf("get header error: %w", err) + return + } + + var ok bool + hdrPart := resp.GetBody().GetHeaderPart() + if hdrWithSig, ok = hdrPart.(*objectV2.HeaderWithSignature); !ok { + err = fmt.Errorf("unexpected header type: %T", hdrPart) + } + return +} + +func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { + ctx, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send") + defer span.End() + + defer func() { + s.nonFirstSend = true + }() + + if !s.nonFirstSend { + if err := s.init(ctx, req); err != nil { + return fmt.Errorf("streamer init error: %w", err) + } + } + + patch := new(objectSDK.Patch) + patch.FromV2(req.GetBody()) + + if !s.nonFirstSend { + err := s.patcher.ApplyHeaderPatch(ctx, + patcher.ApplyHeaderPatchPrm{ + NewSplitHeader: patch.NewSplitHeader, + NewAttributes: patch.NewAttributes, + ReplaceAttributes: patch.ReplaceAttributes, + }) + if err != nil { + return fmt.Errorf("patch attributes: %w", err) + } + } + + if patch.PayloadPatch != nil { + err := s.patcher.ApplyPayloadPatch(ctx, patch.PayloadPatch) + if err != nil { + return fmt.Errorf("patch payload: %w", err) + } + } else if s.nonFirstSend { + return errors.New("invalid non-first patch: empty payload") + } + + return nil +} + +func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + if s.patcher == nil { + return nil, errors.New("uninitialized patch streamer") + } + patcherResp, err := s.patcher.Close(ctx) + if err != nil { + return nil, err + } + + oidV2 := new(refsV2.ObjectID) + + if patcherResp.AccessIdentifiers.ParentID != nil { + patcherResp.AccessIdentifiers.ParentID.WriteToV2(oidV2) + } else { + patcherResp.AccessIdentifiers.SelfID.WriteToV2(oidV2) + } + + return &objectV2.PatchResponse{ + Body: &objectV2.PatchResponseBody{ + ObjectID: oidV2, + }, + }, nil +} diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go new file mode 100644 index 000000000..b9416789c --- /dev/null +++ b/pkg/services/object/patch/util.go @@ -0,0 +1,34 @@ +package patchsvc + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) { + for vh.GetOrigin() != nil { + vh = vh.GetOrigin() + } + sig := vh.GetBodySignature() + if sig == nil { + return nil, errors.New("empty body signature") + } + key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("invalid signature key: %w", err) + } + + var userID user.ID + user.IDFromKey(&userID, (ecdsa.PublicKey)(*key)) + ownID := new(refs.OwnerID) + userID.WriteToV2(ownID) + + return ownID, nil +} diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index a93873738..7aeb5857d 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -1,132 +1,63 @@ package putsvc import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) -type MaxSizeSource interface { - // MaxObjectSize returns maximum payload size - // of physically stored object in system. - // - // Must return 0 if value can not be obtained. - MaxObjectSize() uint64 -} - type Service struct { - *cfg -} - -type Option func(*cfg) - -type ClientConstructor interface { - Get(client.NodeInfo) (client.MultiAddressClient, error) -} - -type InnerRing interface { - InnerRingKeys() ([][]byte, error) -} - -type FormatValidatorConfig interface { - VerifySessionTokenIssuer() bool -} - -type cfg struct { - keyStorage *objutil.KeyStorage - - maxSizeSrc MaxSizeSource - - localStore ObjectStorage - - cnrSrc container.Source - - netMapSrc netmap.Source - - remotePool, localPool util.WorkerPool - - netmapKeys netmap.AnnouncedKeys - - fmtValidator *object.FormatValidator - - networkState netmap.State - - clientConstructor ClientConstructor - - log *logger.Logger - - verifySessionTokenIssuer bool + *objectwriter.Config } func NewService(ks *objutil.KeyStorage, - cc ClientConstructor, - ms MaxSizeSource, - os ObjectStorage, + cc objectwriter.ClientConstructor, + ms objectwriter.MaxSizeSource, + os objectwriter.ObjectStorage, cs container.Source, ns netmap.Source, nk netmap.AnnouncedKeys, nst netmap.State, - ir InnerRing, - opts ...Option, + ir objectwriter.InnerRing, + opts ...objectwriter.Option, ) *Service { - c := &cfg{ - remotePool: util.NewPseudoWorkerPool(), - localPool: util.NewPseudoWorkerPool(), - log: &logger.Logger{Logger: zap.L()}, - keyStorage: ks, - clientConstructor: cc, - maxSizeSrc: ms, - localStore: os, - cnrSrc: cs, - netMapSrc: ns, - netmapKeys: nk, - networkState: nst, + c := &objectwriter.Config{ + Logger: logger.NewLoggerWrapper(zap.L()), + KeyStorage: ks, + ClientConstructor: cc, + MaxSizeSrc: ms, + LocalStore: os, + ContainerSource: cs, + NetmapSource: ns, + NetmapKeys: nk, + NetworkState: nst, } for i := range opts { opts[i](c) } - c.fmtValidator = object.NewFormatValidator( + c.FormatValidator = object.NewFormatValidator( object.WithLockSource(os), object.WithNetState(nst), object.WithInnerRing(ir), object.WithNetmapSource(ns), object.WithContainersSource(cs), - object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer), - object.WithLogger(c.log), + object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer), + object.WithLogger(c.Logger), ) return &Service{ - cfg: c, + Config: c, } } -func (p *Service) Put() (*Streamer, error) { +func (s *Service) Put() (*Streamer, error) { return &Streamer{ - cfg: p.cfg, + Config: s.Config, }, nil } - -func WithWorkerPools(remote, local util.WorkerPool) Option { - return func(c *cfg) { - c.remotePool, c.localPool = remote, local - } -} - -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - -func WithVerifySessionTokenIssuer(v bool) Option { - return func(c *cfg) { - c.verifySessionTokenIssuer = v - } -} diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 6d2f3dba8..90f473254 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -9,23 +9,26 @@ import ( "hash" "sync" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/tzhash/tz" @@ -83,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest } func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.validarePutSingleSize(obj); err != nil { + if err := s.validarePutSingleSize(ctx, obj); err != nil { return object.ContentMeta{}, err } @@ -94,14 +97,14 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) return s.validatePutSingleObject(ctx, obj) } -func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error { +func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error { if uint64(len(obj.Payload())) != obj.PayloadSize() { - return ErrWrongPayloadSize + return target.ErrWrongPayloadSize } - maxAllowedSize := s.maxSizeSrc.MaxObjectSize() + maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { - return ErrExceedingMaxSize + return target.ErrExceedingMaxSize } return nil @@ -136,11 +139,11 @@ func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error { } func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.fmtValidator.Validate(ctx, obj, false); err != nil { + if err := s.FormatValidator.Validate(ctx, obj, false); err != nil { return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err) } - meta, err := s.fmtValidator.ValidateContent(obj) + meta, err := s.FormatValidator.ValidateContent(obj) if err != nil { return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err) } @@ -150,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { localOnly := req.GetMetaHeader().GetTTL() <= 1 - placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly) + placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly) if err != nil { return err } @@ -163,17 +166,18 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.cfg.newNodeIterator(placement.placementOptions) - iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly) + iter := s.NewNodeIterator(placement.placementOptions) + iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) + iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.keyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } - return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error { - return s.saveToPlacementNode(ctx, &nd, obj, signer, meta) + return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error { + return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container) }) } @@ -182,25 +186,25 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.cfg.keyStorage.GetKey(nil) + key, err := s.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.keyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } - w := ecWriter{ - cfg: s.cfg, - placementOpts: placement.placementOptions, - objMeta: meta, - objMetaValid: true, - commonPrm: commonPrm, - container: placement.container, - key: key, - relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error { + w := objectwriter.ECWriter{ + Config: s.Config, + PlacementOpts: placement.placementOptions, + ObjectMeta: meta, + ObjectMetaValid: true, + CommonPrm: commonPrm, + Container: placement.container, + Key: key, + Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error { return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac) }, } @@ -208,19 +212,20 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace } type putSinglePlacement struct { - placementOptions []placement.Option - isEC bool - container containerSDK.Container + placementOptions []placement.Option + isEC bool + container containerSDK.Container + resetSuccessAfterOnBroadcast bool } -func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { +func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { var result putSinglePlacement cnrID, ok := obj.ContainerID() if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.cnrSrc.Get(cnrID) + cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -229,6 +234,10 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb if len(copiesNumber) > 0 && !result.isEC { result.placementOptions = append(result.placementOptions, placement.WithCopyNumbers(copiesNumber)) } + if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly { + result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1))) + result.resetSuccessAfterOnBroadcast = true + } result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value)) objID, ok := obj.ID() @@ -240,31 +249,31 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(s.netMapSrc) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.netmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil } -func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object, - signer *putSingleRequestSigner, meta object.ContentMeta, +func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object, + signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container, ) error { - if nodeDesc.local { - return s.saveLocal(ctx, obj, meta) + if nodeDesc.Local { + return s.saveLocal(ctx, obj, meta, container) } var info client.NodeInfo - client.NodeInfoFromNetmapElement(&info, nodeDesc.info) + client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.clientConstructor.Get(info) + c, err := s.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -272,9 +281,10 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, o return s.redirectPutSingleRequest(ctx, signer, obj, info, c) } -func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error { - localTarget := &localTarget{ - storage: s.localStore, +func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { + localTarget := &objectwriter.LocalTarget{ + Storage: s.LocalStore, + Container: container, } return localTarget.WriteObject(ctx, obj, meta) } @@ -307,12 +317,11 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.log.Warn(logs.PutSingleRedirectFailure, + s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), zap.Stringer("container_id", cnrID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } @@ -341,8 +350,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, err = signature.VerifyServiceMessage(resp) if err != nil { err = fmt.Errorf("response verification failed: %w", err) + return } + st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus()) + err = apistatus.ErrFromStatus(st) + return }) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 14dae38d5..19768b7fa 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -2,32 +2,18 @@ package putsvc import ( "context" - "crypto/ecdsa" "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - pkgutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) type Streamer struct { - *cfg - - privateKey *ecdsa.PrivateKey + *objectwriter.Config target transformer.ChunkedObjectWriter - - relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error - - maxPayloadSz uint64 // network config } var errNotInit = errors.New("stream not initialized") @@ -35,8 +21,23 @@ var errNotInit = errors.New("stream not initialized") var errInitRecall = errors.New("init recall") func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { + if p.target != nil { + return errInitRecall + } + // initialize destination target - if err := p.initTarget(prm); err != nil { + prmTarget := objectwriter.Params{ + Config: p.Config, + Common: prm.common, + Header: prm.hdr, + Container: prm.cnr, + TraverseOpts: prm.traverseOpts, + Relay: prm.relay, + } + + var err error + p.target, err = target.New(ctx, prmTarget) + if err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } @@ -46,230 +47,6 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { return nil } -// MaxObjectSize returns maximum payload size for the streaming session. -// -// Must be called after the successful Init. -func (p *Streamer) MaxObjectSize() uint64 { - return p.maxPayloadSz -} - -func (p *Streamer) initTarget(prm *PutInitPrm) error { - // prevent re-calling - if p.target != nil { - return errInitRecall - } - - // prepare needed put parameters - if err := p.preparePrm(prm); err != nil { - return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err) - } - - p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize() - if p.maxPayloadSz == 0 { - return fmt.Errorf("(%T) could not obtain max object size parameter", p) - } - - if prm.hdr.Signature() != nil { - return p.initUntrustedTarget(prm) - } - return p.initTrustedTarget(prm) -} - -func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error { - p.relay = prm.relay - - nodeKey, err := p.cfg.keyStorage.GetKey(nil) - if err != nil { - return err - } - p.privateKey = nodeKey - - // prepare untrusted-Put object target - p.target = &validatingPreparedTarget{ - nextTarget: newInMemoryObjectBuilder(p.newObjectWriter(prm)), - fmt: p.fmtValidator, - - maxPayloadSz: p.maxPayloadSz, - } - - return nil -} - -func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error { - sToken := prm.common.SessionToken() - - // prepare trusted-Put object target - - // get private token from local storage - var sessionInfo *util.SessionInfo - - if sToken != nil { - sessionInfo = &util.SessionInfo{ - ID: sToken.ID(), - Owner: sToken.Issuer(), - } - } - - key, err := p.keyStorage.GetKey(sessionInfo) - if err != nil { - return fmt.Errorf("(%T) could not receive session key: %w", p, err) - } - - // In case session token is missing, the line above returns the default key. - // If it isn't owner key, replication attempts will fail, thus this check. - ownerObj := prm.hdr.OwnerID() - if ownerObj.IsEmpty() { - return errors.New("missing object owner") - } - - if sToken == nil { - var ownerSession user.ID - user.IDFromKey(&ownerSession, key.PublicKey) - - if !ownerObj.Equals(ownerSession) { - return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p) - } - } else { - if !ownerObj.Equals(sessionInfo.Owner) { - return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", p, sessionInfo.Owner, ownerObj) - } - } - - p.privateKey = key - p.target = &validatingTarget{ - fmt: p.fmtValidator, - nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: key, - NextTargetInit: func() transformer.ObjectWriter { return p.newObjectWriter(prm) }, - NetworkState: p.networkState, - MaxSize: p.maxPayloadSz, - WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.cnr), - SessionToken: sToken, - }), - } - - return nil -} - -func (p *Streamer) preparePrm(prm *PutInitPrm) error { - var err error - - // get latest network map - nm, err := netmap.GetLatestNetworkMap(p.netMapSrc) - if err != nil { - return fmt.Errorf("(%T) could not get latest network map: %w", p, err) - } - - idCnr, ok := prm.hdr.ContainerID() - if !ok { - return errors.New("missing container ID") - } - - // get container to store the object - cnrInfo, err := p.cnrSrc.Get(idCnr) - if err != nil { - return fmt.Errorf("(%T) could not get container by ID: %w", p, err) - } - - prm.cnr = cnrInfo.Value - - // add common options - prm.traverseOpts = append(prm.traverseOpts, - // set processing container - placement.ForContainer(prm.cnr), - ) - - if ech := prm.hdr.ECHeader(); ech != nil { - prm.traverseOpts = append(prm.traverseOpts, - // set identifier of the processing object - placement.ForObject(ech.Parent()), - ) - } else if id, ok := prm.hdr.ID(); ok { - prm.traverseOpts = append(prm.traverseOpts, - // set identifier of the processing object - placement.ForObject(id), - ) - } - - // create placement builder from network map - builder := placement.NewNetworkMapBuilder(nm) - - if prm.common.LocalOnly() { - // restrict success count to 1 stored copy (to local storage) - prm.traverseOpts = append(prm.traverseOpts, placement.SuccessAfter(1)) - - // use local-only placement builder - builder = util.NewLocalPlacement(builder, p.netmapKeys) - } - - // set placement builder - prm.traverseOpts = append(prm.traverseOpts, placement.UseBuilder(builder)) - - return nil -} - -func (p *Streamer) newObjectWriter(prm *PutInitPrm) transformer.ObjectWriter { - if container.IsECContainer(prm.cnr) && object.IsECSupported(prm.hdr) { - return p.newECWriter(prm) - } - return p.newDefaultObjectWriter(prm) -} - -func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm) transformer.ObjectWriter { - var relay func(context.Context, nodeDesc) error - if p.relay != nil { - relay = func(ctx context.Context, node nodeDesc) error { - var info client.NodeInfo - - client.NodeInfoFromNetmapElement(&info, node.info) - - c, err := p.clientConstructor.Get(info) - if err != nil { - return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) - } - - return p.relay(ctx, info, c) - } - } - - return &distributedTarget{ - cfg: p.cfg, - placementOpts: prm.traverseOpts, - nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget { - if node.local { - return localTarget{ - storage: p.localStore, - } - } - - rt := &remoteTarget{ - privateKey: p.privateKey, - commonPrm: prm.common, - clientConstructor: p.clientConstructor, - } - - client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.info) - - return rt - }, - relay: relay, - } -} - -func (p *Streamer) newECWriter(prm *PutInitPrm) transformer.ObjectWriter { - return &objectWriterDispatcher{ - ecWriter: &ecWriter{ - cfg: p.cfg, - placementOpts: append(prm.traverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC - container: prm.cnr, - key: p.privateKey, - commonPrm: prm.common, - relay: p.relay, - }, - repWriter: p.newDefaultObjectWriter(prm), - } -} - func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error { if p.target == nil { return errNotInit @@ -303,10 +80,3 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) { id: ids.SelfID, }, nil } - -func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) { - if c.netmapKeys.IsLocalKey(pub) { - return c.localPool, true - } - return c.remotePool, false -} diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go index db902ae59..78d4c711d 100644 --- a/pkg/services/object/put/v2/service.go +++ b/pkg/services/object/put/v2/service.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Put operation of Object service v2. diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 9c6de4ca8..f0c648187 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -4,18 +4,19 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -55,15 +56,15 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) s.saveChunks = v.GetSignature() != nil if s.saveChunks { - maxSz := s.stream.MaxObjectSize() + maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx) s.sizes = &sizes{ - payloadSz: uint64(v.GetHeader().GetPayloadLength()), + payloadSz: v.GetHeader().GetPayloadLength(), } // check payload size limit overflow if s.payloadSz > maxSz { - return putsvc.ErrExceedingMaxSize + return target.ErrExceedingMaxSize } s.init = req @@ -74,7 +75,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) // check payload size overflow if s.writtenPayload > s.payloadSz { - return putsvc.ErrWrongPayloadSize + return target.ErrWrongPayloadSize } } @@ -117,7 +118,7 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error if s.saveChunks { // check payload size correctness if s.writtenPayload != s.payloadSz { - return nil, putsvc.ErrWrongPayloadSize + return nil, target.ErrWrongPayloadSize } } diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go index a157a9542..5ec9ebe10 100644 --- a/pkg/services/object/put/v2/util.go +++ b/pkg/services/object/put/v2/util.go @@ -1,10 +1,10 @@ package putsvc import ( - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" ) diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go new file mode 100644 index 000000000..01eb1ea8d --- /dev/null +++ b/pkg/services/object/qos.go @@ -0,0 +1,145 @@ +package object + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" +) + +var _ ServiceServer = (*qosObjectService)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type qosObjectService struct { + next ServiceServer + adj AdjustIOTag +} + +func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { + return &qosObjectService{ + next: next, + adj: adjIOTag, + } +} + +func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Delete(ctx, req) +} + +func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Get(req, &qosReadStream[*object.GetResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRangeHash(ctx, req) +} + +func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Head(ctx, req) +} + +func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { + s, err := q.next.Patch(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { + s, err := q.next.Put(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.PutSingle(ctx, req) +} + +func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosReadStream[T any] struct { + sender qosSend[T] + ctxF func() context.Context +} + +func (g *qosReadStream[T]) Context() context.Context { + return g.ctxF() +} + +func (g *qosReadStream[T]) Send(resp T) error { + return g.sender.Send(resp) +} + +type qosVerificationHeader interface { + GetVerificationHeader() *session.RequestVerificationHeader +} + +type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { + Send(context.Context, TReq) error + CloseAndRecv(context.Context) (TResp, error) +} + +type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { + s qosSendRecv[TReq, TResp] + adj AdjustIOTag + + ioTag string + ioTagDefined bool +} + +func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { + if q.ioTagDefined { + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + } + return q.s.CloseAndRecv(ctx) +} + +func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { + if !q.ioTagDefined { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) + } + assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + return q.s.Send(ctx, req) +} diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go index 18b6107cf..bc6ffd160 100644 --- a/pkg/services/object/remote_reader.go +++ b/pkg/services/object/remote_reader.go @@ -2,7 +2,6 @@ package object import ( "context" - "errors" "fmt" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -35,8 +34,6 @@ type RemoteRequestPrm struct { const remoteOpTTL = 1 -var ErrNotFound = errors.New("object header not found") - // NewRemoteReader creates, initializes and returns new RemoteHeader instance. func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader { return &RemoteReader{ diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go deleted file mode 100644 index 95d4c9d93..000000000 --- a/pkg/services/object/request_context.go +++ /dev/null @@ -1,26 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type RequestContextKeyT struct{} - -var RequestContextKey = RequestContextKeyT{} - -// RequestContext is a context passed between middleware handlers. -type RequestContext struct { - Namespace string - - SenderKey []byte - - ContainerOwner user.ID - - Role acl.Role - - SoftAPECheck bool - - BearerToken *bearer.Token -} diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go index a10f26a34..80c971e8f 100644 --- a/pkg/services/object/response.go +++ b/pkg/services/object/response.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ResponseService struct { @@ -37,6 +37,11 @@ type putStreamResponser struct { respSvc *response.Service } +type patchStreamResponser struct { + stream PatchObjectStream + respSvc *response.Service +} + // NewResponseService returns object service instance that passes internal service // call to response service. func NewResponseService(objSvc ServiceServer, respSvc *response.Service) *ResponseService { @@ -75,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo return r, nil } -func (s *ResponseService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -87,6 +92,35 @@ func (s *ResponseService) Put() (PutObjectStream, error) { }, nil } +func (s *patchStreamResponser) Send(ctx context.Context, req *object.PatchRequest) error { + if err := s.stream.Send(ctx, req); err != nil { + return fmt.Errorf("could not send the request: %w", err) + } + return nil +} + +func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { + r, err := s.stream.CloseAndRecv(ctx) + if err != nil { + return nil, fmt.Errorf("could not close stream and receive response: %w", err) + } + + s.respSvc.SetMeta(r) + return r, nil +} + +func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) + if err != nil { + return nil, fmt.Errorf("could not create Put object streamer: %w", err) + } + + return &patchStreamResponser{ + stream: stream, + respSvc: s.respSvc, + }, nil +} + func (s *ResponseService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { resp, err := s.svc.PutSingle(ctx, req) if err != nil { diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index d70574156..60d469b11 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -8,18 +8,19 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" ) func (exec *execCtx) executeOnContainer(ctx context.Context) error { lookupDepth := exec.netmapLookupDepth() - exec.log.Debug(logs.TryingToExecuteInContainer, + exec.log.Debug(ctx, logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) // initialize epoch number - if err := exec.initEpoch(); err != nil { + if err := exec.initEpoch(ctx); err != nil { return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err) } @@ -43,11 +44,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error { } func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { - exec.log.Debug(logs.ProcessEpoch, + exec.log.Debug(ctx, logs.ProcessEpoch, zap.Uint64("number", exec.curProcEpoch), ) - traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch) + traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch) if err != nil { return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err) } @@ -58,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration) + exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) break } @@ -71,8 +72,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { defer wg.Done() select { case <-ctx.Done(): - exec.log.Debug(logs.InterruptPlacementIterationByContext, - zap.String("error", ctx.Err().Error())) + exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext, + zap.Error(ctx.Err())) return default: } @@ -81,18 +82,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { client.NodeInfoFromNetmapElement(&info, addrs[i]) - exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) + exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) c, err := exec.svc.clientConstructor.get(info) if err != nil { - exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err)) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { - exec.log.Debug(logs.SearchRemoteOperationFailed, - zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchRemoteOperationFailed, + zap.Error(err)) return } @@ -101,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { err = exec.writeIDList(ids) mtx.Unlock() if err != nil { - exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err)) return } }(i) @@ -112,3 +113,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { return nil } + +func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) { + cnrID := exec.containerID() + cnr, err := exec.svc.containerSource.Get(ctx, cnrID) + if err != nil { + return containerSDK.Container{}, err + } + return cnr.Value, nil +} diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index 4a2c04ecd..ced51ecce 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,6 +1,8 @@ package searchsvc import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -19,13 +21,13 @@ type execCtx struct { } func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = &logger.Logger{Logger: l.With( + exec.log = l.With( zap.String("request", "SEARCH"), zap.Stringer("container", exec.containerID()), zap.Bool("local", exec.isLocal()), zap.Bool("with session", exec.prm.common.SessionToken() != nil), zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - )} + ) } func (exec *execCtx) isLocal() bool { @@ -48,13 +50,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 { return exec.prm.common.NetmapLookupDepth() } -func (exec *execCtx) initEpoch() error { +func (exec *execCtx) initEpoch(ctx context.Context) error { exec.curProcEpoch = exec.netmapEpoch() if exec.curProcEpoch > 0 { return nil } - e, err := exec.svc.currentEpochReceiver.Epoch() + e, err := exec.svc.currentEpochReceiver.Epoch(ctx) if err != nil { return err } diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index cfaed13b8..ec65ab06a 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -11,7 +11,7 @@ import ( func (exec *execCtx) executeLocal(ctx context.Context) error { ids, err := exec.svc.localStorage.search(ctx, exec) if err != nil { - exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err)) return err } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index 4a5c414d5..76c091f85 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(logs.ServingRequest) + exec.log.Debug(ctx, logs.ServingRequest) err := exec.executeLocal(ctx) - exec.logResult(err) + exec.logResult(ctx, err) if exec.isLocal() { - exec.log.Debug(logs.SearchReturnResultDirectly) + exec.log.Debug(ctx, logs.SearchReturnResultDirectly) return err } err = exec.executeOnContainer(ctx) - exec.logResult(err) + exec.logResult(ctx, err) return err } -func (exec *execCtx) logResult(err error) { +func (exec *execCtx) logResult(ctx context.Context, err error) { switch { default: - exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) case err == nil: - exec.log.Debug(logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) } } diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 679380402..918ad421f 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -6,10 +6,10 @@ import ( "crypto/sha256" "errors" "fmt" + "slices" "strconv" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -17,6 +17,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" @@ -58,7 +59,7 @@ type simpleIDWriter struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -81,8 +82,8 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { - t, err := placement.NewTraverser( +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { + t, err := placement.NewTraverser(context.Background(), placement.ForContainer(g.c), placement.UseBuilder(g.b[epoch]), placement.WithoutSuccessTracking(), @@ -90,7 +91,7 @@ func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch ui return t, &containerCore.Container{Value: g.c}, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) @@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap. return nil, errors.New("vectors for address not found") } - res := make([][]netmap.NodeInfo, len(vs)) - copy(res, vs) + res := slices.Clone(vs) return res, nil } @@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) { func generateIDs(num int) []oid.ID { res := make([]oid.ID, num) - for i := 0; i < num; i++ { + for i := range num { res[i].SetSHA256(testSHA256()) } @@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index cc388c1b2..56fe56468 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -46,14 +46,16 @@ type cfg struct { } traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } currentEpochReceiver interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } keyStore *util.KeyStorage + + containerSource container.Source } // New creates, initializes and returns utility serving @@ -63,10 +65,11 @@ func New(e *engine.StorageEngine, tg *util.TraverserGenerator, ns netmap.Source, ks *util.KeyStorage, + cs container.Source, opts ...Option, ) *Service { c := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), clientConstructor: &clientConstructorWrapper{ constructor: cc, }, @@ -76,6 +79,7 @@ func New(e *engine.StorageEngine, traverserGenerator: tg, currentEpochReceiver: ns, keyStore: ks, + containerSource: cs, } for i := range opts { @@ -90,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))} + c.log = l } } diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 67b6c0d01..0be5345b9 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -2,9 +2,11 @@ package searchsvc import ( "context" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" @@ -52,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { } // exclude processed address - list = append(list[:i], list[i+1:]...) + list = slices.Delete(list, i, i+1) i-- } @@ -112,9 +114,13 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c } func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { + cnr, err := exec.getContainer(ctx) + if err != nil { + return nil, err + } var selectPrm engine.SelectPrm selectPrm.WithFilters(exec.searchFilters()) - selectPrm.WithContainerID(exec.containerID()) + selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr)) r, err := e.storage.Select(ctx, selectPrm) if err != nil { diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go index 5a2e9b936..7bb6e4d3c 100644 --- a/pkg/services/object/search/v2/request_forwarder.go +++ b/pkg/services/object/search/v2/request_forwarder.go @@ -8,14 +8,14 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go index 78b72ac79..856cd9f04 100644 --- a/pkg/services/object/search/v2/service.go +++ b/pkg/services/object/search/v2/service.go @@ -1,10 +1,10 @@ package searchsvc import ( - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Search operation of Object service v2. diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go index 15e2d53d5..93b281343 100644 --- a/pkg/services/object/search/v2/streamer.go +++ b/pkg/services/object/search/v2/streamer.go @@ -1,9 +1,9 @@ package searchsvc import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go index e971fa8e5..48ae98958 100644 --- a/pkg/services/object/search/v2/util.go +++ b/pkg/services/object/search/v2/util.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index 73b88f233..e65293977 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -3,8 +3,8 @@ package object import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // GetObjectStream is an interface of FrostFS API v2 compatible object streamer. @@ -31,11 +31,18 @@ type PutObjectStream interface { CloseAndRecv(context.Context) (*object.PutResponse, error) } +// PatchObjectStream is an interface of FrostFS API v2 compatible patch streamer. +type PatchObjectStream interface { + Send(context.Context, *object.PatchRequest) error + CloseAndRecv(context.Context) (*object.PatchResponse, error) +} + // ServiceServer is an interface of utility // serving v2 Object service. type ServiceServer interface { Get(*object.GetRequest, GetObjectStream) error - Put() (PutObjectStream, error) + Put(context.Context) (PutObjectStream, error) + Patch(context.Context) (PatchObjectStream, error) Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error) Search(*object.SearchRequest, SearchStream) error Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error) diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 4bf581b78..fd8e926dd 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -5,13 +5,11 @@ import ( "crypto/ecdsa" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type SignService struct { - key *ecdsa.PrivateKey - sigSvc *util.SignService svc ServiceServer @@ -35,6 +33,12 @@ type putStreamSigner struct { err error } +type patchStreamSigner struct { + sigSvc *util.SignService + stream PatchObjectStream + err error +} + type getRangeStreamSigner struct { GetObjectRangeStream sigSvc *util.SignService @@ -42,7 +46,6 @@ type getRangeStreamSigner struct { func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService { return &SignService{ - key: key, sigSvc: util.NewUnarySignService(key), svc: svc, } @@ -93,15 +96,16 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PutResponse) } } return resp, s.sigSvc.SignResponse(resp, err) } -func (s *SignService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -112,6 +116,43 @@ func (s *SignService) Put() (PutObjectStream, error) { }, nil } +func (s *patchStreamSigner) Send(ctx context.Context, req *object.PatchRequest) error { + if s.err = s.sigSvc.VerifyRequest(req); s.err != nil { + return util.ErrAbortStream + } + if s.err = s.stream.Send(ctx, req); s.err != nil { + return util.ErrAbortStream + } + return nil +} + +func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PatchResponse, err error) { + if s.err != nil { + err = s.err + resp = new(object.PatchResponse) + } else { + resp, err = s.stream.CloseAndRecv(ctx) + if err != nil { + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PatchResponse) + } + } + + return resp, s.sigSvc.SignResponse(resp, err) +} + +func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) + if err != nil { + return nil, fmt.Errorf("could not create Put object streamer: %w", err) + } + + return &patchStreamSigner{ + stream: stream, + sigSvc: s.sigSvc, + }, nil +} + func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { if err := s.sigSvc.VerifyRequest(req); err != nil { resp := new(object.HeadResponse) diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index 54e49cb12..b446d3605 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -4,8 +4,8 @@ import ( "bytes" "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ( @@ -87,8 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream) }) } -func (c TransportSplitter) Put() (PutObjectStream, error) { - return c.next.Put() +func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) { + return c.next.Put(ctx) +} + +func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) { + return c.next.Patch(ctx) } func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) { @@ -158,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error { var newResp *object.SearchResponse - for ln := uint64(len(ids)); ; { + for { if newResp == nil { newResp = new(object.SearchResponse) newResp.SetBody(body) } - cut := min(s.addrAmount, ln) + cut := min(s.addrAmount, uint64(len(ids))) body.SetIDList(ids[:cut]) newResp.SetMetaHeader(resp.GetMetaHeader()) diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go index cb7ddfde5..1753a26f7 100644 --- a/pkg/services/object/util/key_test.go +++ b/pkg/services/object/util/key_test.go @@ -5,10 +5,10 @@ import ( "crypto/elliptic" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 92beedaa7..b10826226 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -1,6 +1,8 @@ package util import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -8,18 +10,10 @@ import ( ) // LogServiceError writes error message of object service to provided logger. -func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error(logs.UtilObjectServiceError, +func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) { + l.Error(ctx, logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), - zap.String("error", err.Error()), - ) -} - -// LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error(logs.UtilCouldNotPushTaskToWorkerPool, - zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index 1bd39f9ea..f74b0aab9 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -1,7 +1,9 @@ package util import ( + "context" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -43,8 +45,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu } } -func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -76,8 +78,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac } } -func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -92,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapS } if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = append(vs[i][:j], vs[i][j+1:]...) + vs[i] = slices.Delete(vs[i], j, j+1) j-- } } @@ -122,15 +124,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav // GenerateTraverser generates placement Traverser for provided object address // using epoch-th network map. -func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { +func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { // get network map by epoch - nm, err := g.netMapSrc.GetNetMapByEpoch(epoch) + nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err) } // get container related container - cnr, err := g.cnrSrc.Get(idCnr) + cnr, err := g.cnrSrc.Get(ctx, idCnr) if err != nil { return nil, nil, fmt.Errorf("could not get container: %w", err) } @@ -160,7 +162,7 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc ) } - t, err := placement.NewTraverser(traverseOpts...) + t, err := placement.NewTraverser(ctx, traverseOpts...) if err != nil { return nil, nil, err } diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go index 022b9fe5b..34d8ec704 100644 --- a/pkg/services/object/util/prm.go +++ b/pkg/services/object/util/prm.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" ) @@ -100,11 +100,18 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) { // ForgetTokens forgets all the tokens read from the request's // meta information before. -func (p *CommonPrm) ForgetTokens() { +func (p *CommonPrm) ForgetTokens() func() { if p != nil { + tk := p.token + br := p.bearer p.token = nil p.bearer = nil + return func() { + p.token = tk + p.bearer = br + } } + return func() {} } func CommonPrmFromV2(req interface { diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go index 217261877..2a8460ca5 100644 --- a/pkg/services/object_manager/placement/cache.go +++ b/pkg/services/object_manager/placement/cache.go @@ -3,6 +3,7 @@ package placement import ( "crypto/sha256" "fmt" + "slices" "sync" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -44,7 +45,7 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p raw, ok := c.containerCache.Get(cnr) c.mtx.Unlock() if ok { - return raw, nil + return c.cloneResult(raw), nil } } else { c.lastEpoch = nm.Epoch() @@ -65,5 +66,13 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p c.containerCache.Add(cnr, cn) } c.mtx.Unlock() - return cn, nil + return c.cloneResult(cn), nil +} + +func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo { + result := make([][]netmapSDK.NodeInfo, len(nodes)) + for repIdx := range nodes { + result[repIdx] = slices.Clone(nodes[repIdx]) + } + return result } diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go index 07e9340dc..7242970b5 100644 --- a/pkg/services/object_manager/placement/cache_test.go +++ b/pkg/services/object_manager/placement/cache_test.go @@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) { nm2 := nm(1, nodes[1:2]) cnr := [size * 2]cid.ID{} res := [size * 2][][]netmapSDK.NodeInfo{} - for i := 0; i < size*2; i++ { + for i := range size * 2 { cnr[i] = cidtest.ID() var err error @@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) { require.NoError(t, err) require.Equal(t, res[i], r) } - for i := 0; i < size; i++ { + for i := range size { r, err := c.ContainerNodes(nm2, cnr[i], pp) require.NoError(t, err) require.NotEqual(t, res[i], r) @@ -85,7 +85,10 @@ func TestContainerNodesCache(t *testing.T) { }) t.Run("the error is propagated", func(t *testing.T) { var pp netmapSDK.PlacementPolicy - require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X")) + r := netmapSDK.ReplicaDescriptor{} + r.SetNumberOfObjects(1) + r.SetSelectorName("Missing") + pp.AddReplicas(r) c := placement.NewContainerNodesCache(size) _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp) diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go new file mode 100644 index 000000000..0f24a9d96 --- /dev/null +++ b/pkg/services/object_manager/placement/metrics.go @@ -0,0 +1,185 @@ +package placement + +import ( + "errors" + "fmt" + "maps" + "math" + "strings" + "sync" + "sync/atomic" + + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" + locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +const ( + attrPrefix = "$attribute:" + + geoDistance = "$geoDistance" +) + +type Metric interface { + CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int +} + +type metricsParser struct { + locodeDBPath string + locodes map[string]locodedb.Point +} + +type MetricParser interface { + ParseMetrics([]string) ([]Metric, error) +} + +func NewMetricsParser(locodeDBPath string) (MetricParser, error) { + return &metricsParser{ + locodeDBPath: locodeDBPath, + }, nil +} + +func (p *metricsParser) initLocodes() error { + if len(p.locodes) != 0 { + return nil + } + if len(p.locodeDBPath) > 0 { + p.locodes = make(map[string]locodedb.Point) + locodeDB := locodebolt.New(locodebolt.Prm{ + Path: p.locodeDBPath, + }, + locodebolt.ReadOnly(), + ) + err := locodeDB.Open() + if err != nil { + return err + } + defer locodeDB.Close() + err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { + p.locodes[k] = v + }) + if err != nil { + return err + } + return nil + } + return errors.New("set path to locode database") +} + +func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { + var metrics []Metric + for _, raw := range priority { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + metrics = append(metrics, NewAttributeMetric(attr)) + } else if raw == geoDistance { + err := p.initLocodes() + if err != nil { + return nil, err + } + if len(p.locodes) == 0 { + return nil, fmt.Errorf("provide locodes database for metric %s", raw) + } + m := NewGeoDistanceMetric(p.locodes) + metrics = append(metrics, m) + } else { + return nil, fmt.Errorf("unsupported priority metric %s", raw) + } + } + return metrics, nil +} + +// attributeMetric describes priority metric based on attribute. +type attributeMetric struct { + attribute string +} + +// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and +// the value of attribute is the same. In other case return [1]. +func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fromAttr := from.Attribute(am.attribute) + toAttr := to.Attribute(am.attribute) + if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr { + return 0 + } + return 1 +} + +func NewAttributeMetric(attr string) Metric { + return &attributeMetric{attribute: attr} +} + +// geoDistanceMetric describes priority metric based on attribute. +type geoDistanceMetric struct { + locodes map[string]locodedb.Point + distance *atomic.Pointer[map[string]int] + mtx sync.Mutex +} + +func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { + d := atomic.Pointer[map[string]int]{} + m := make(map[string]int) + d.Store(&m) + gm := &geoDistanceMetric{ + locodes: locodes, + distance: &d, + } + return gm +} + +// CalculateValue return distance in kilometers between current node and provided, +// if coordinates for provided node found. In other case return math.MaxInt. +func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fl := from.LOCODE() + tl := to.LOCODE() + if fl == tl { + return 0 + } + m := gm.distance.Load() + if v, ok := (*m)[fl+tl]; ok { + return v + } + return gm.calculateDistance(fl, tl) +} + +func (gm *geoDistanceMetric) calculateDistance(from, to string) int { + gm.mtx.Lock() + defer gm.mtx.Unlock() + od := gm.distance.Load() + if v, ok := (*od)[from+to]; ok { + return v + } + nd := maps.Clone(*od) + var dist int + pointFrom, okFrom := gm.locodes[from] + pointTo, okTo := gm.locodes[to] + if okFrom && okTo { + dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) + } else { + dist = math.MaxInt + } + nd[from+to] = dist + gm.distance.Store(&nd) + + return dist +} + +// distance return amount of KM between two points. +// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. +// Original implementation can be found here https://www.geodatasource.com/developers/go. +func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { + radLat1 := math.Pi * lt1 / 180 + radLat2 := math.Pi * lt2 / 180 + radTheta := math.Pi * (ln1 - ln2) / 180 + + dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) + + if dist > 1 { + dist = 1 + } + + dist = math.Acos(dist) + dist = dist * 180 / math.Pi + dist = dist * 60 * 1.1515 * 1.609344 + + return dist +} diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go index 1782e27ea..b3f8d9c03 100644 --- a/pkg/services/object_manager/placement/netmap.go +++ b/pkg/services/object_manager/placement/netmap.go @@ -1,6 +1,7 @@ package placement import ( + "context" "crypto/sha256" "fmt" @@ -35,12 +36,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { } } -func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) { +func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) { return s.nm, nil } -func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - nm, err := netmap.GetLatestNetworkMap(b.nmSrc) +func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc) if err != nil { return nil, fmt.Errorf("could not get network map: %w", err) } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 306169571..a3f9af959 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -1,8 +1,10 @@ package placement import ( + "context" "errors" "fmt" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -20,7 +22,12 @@ type Builder interface { // // Must return all container nodes if object identifier // is nil. - BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) + BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) +} + +type NodeState interface { + // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure. + LocalNodeInfo() *netmap.NodeInfo } // Option represents placement traverser option. @@ -50,6 +57,10 @@ type cfg struct { policy netmap.PlacementPolicy builder Builder + + metrics []Metric + + nodeState NodeState } const invalidOptsMsg = "invalid traverser options" @@ -68,7 +79,7 @@ func defaultCfg() *cfg { } // NewTraverser creates, initializes with options and returns Traverser instance. -func NewTraverser(opts ...Option) (*Traverser, error) { +func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { cfg := defaultCfg() for i := range opts { @@ -88,7 +99,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) { return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy) } - ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy) + ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy) if err != nil { return nil, fmt.Errorf("could not build placement: %w", err) } @@ -99,7 +110,20 @@ func NewTraverser(opts ...Option) (*Traverser, error) { } var rem []int - if cfg.flatSuccess != nil { + if len(cfg.metrics) > 0 && cfg.nodeState != nil { + rem = defaultCopiesVector(cfg.policy) + var unsortedVector []netmap.NodeInfo + var regularVector []netmap.NodeInfo + for i := range rem { + pivot := min(len(ns[i]), rem[i]) + unsortedVector = append(unsortedVector, ns[i][:pivot]...) + regularVector = append(regularVector, ns[i][pivot:]...) + } + rem = []int{-1, -1} + + sortedVector := sortVector(cfg, unsortedVector) + ns = [][]netmap.NodeInfo{sortedVector, regularVector} + } else if cfg.flatSuccess != nil { ns = flatNodes(ns) rem = []int{int(*cfg.flatSuccess)} } else { @@ -136,7 +160,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int { replNum := policy.NumberOfReplicas() copyVector := make([]int, 0, replNum) - for i := 0; i < replNum; i++ { + for i := range replNum { copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount())) } @@ -157,6 +181,35 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo { return [][]netmap.NodeInfo{flat} } +type nodeMetrics struct { + index int + metrics []int +} + +func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { + nm := make([]nodeMetrics, len(unsortedVector)) + node := cfg.nodeState.LocalNodeInfo() + + for i := range unsortedVector { + m := make([]int, len(cfg.metrics)) + for j, pm := range cfg.metrics { + m[j] = pm.CalculateValue(node, &unsortedVector[i]) + } + nm[i] = nodeMetrics{ + index: i, + metrics: m, + } + } + slices.SortStableFunc(nm, func(a, b nodeMetrics) int { + return slices.Compare(a.metrics, b.metrics) + }) + sortedVector := make([]netmap.NodeInfo, len(unsortedVector)) + for i := range unsortedVector { + sortedVector[i] = unsortedVector[nm[i].index] + } + return sortedVector +} + // Node is a descriptor of storage node with information required for intra-container communication. type Node struct { addresses network.AddressGroup @@ -212,7 +265,7 @@ func (t *Traverser) Next() []Node { nodes := make([]Node, count) - for i := 0; i < count; i++ { + for i := range count { err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i])) if err != nil { return nil @@ -235,8 +288,8 @@ func (t *Traverser) Next() []Node { func (t *Traverser) skipEmptyVectors() { for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = append(t.vectors[:i], t.vectors[i+1:]...) - t.rem = append(t.rem[:i], t.rem[i+1:]...) + t.vectors = slices.Delete(t.vectors, i, i+1) + t.rem = slices.Delete(t.rem, i, i+1) i-- } else { break @@ -303,6 +356,13 @@ func SuccessAfter(v uint32) Option { } } +// ResetSuccessAfter resets flat success number setting option. +func ResetSuccessAfter() Option { + return func(c *cfg) { + c.flatSuccess = nil + } +} + // WithoutSuccessTracking disables success tracking in traversal. func WithoutSuccessTracking() Option { return func(c *cfg) { @@ -315,3 +375,17 @@ func WithCopyNumbers(v []uint32) Option { c.copyNumbers = v } } + +// WithPriorityMetrics use provided priority metrics to sort nodes. +func WithPriorityMetrics(m []Metric) Option { + return func(c *cfg) { + c.metrics = m + } +} + +// WithNodeState provide state of the current node. +func WithNodeState(s NodeState) Option { + return func(c *cfg) { + c.nodeState = s + } +} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index f5731c81e..d1370f21e 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -1,6 +1,8 @@ package placement import ( + "context" + "slices" "strconv" "testing" @@ -17,12 +19,14 @@ type testBuilder struct { vectors [][]netmap.NodeInfo } -func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return b.vectors, nil } func testNode(v uint32) (n netmap.NodeInfo) { - n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))) + ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)) + n.SetNetworkEndpoints(ip) + n.SetPublicKey([]byte(ip)) return n } @@ -31,8 +35,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { vc := make([][]netmap.NodeInfo, 0, len(v)) for i := range v { - ns := make([]netmap.NodeInfo, len(v[i])) - copy(ns, v[i]) + ns := slices.Clone(v[i]) vc = append(vc, ns) } @@ -40,7 +43,15 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { return vc } -func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { +func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) { + return placement(ss, rs, nil) +} + +func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { + return placement(ss, nil, ec) +} + +func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { nodes := make([][]netmap.NodeInfo, 0, len(rs)) replicas := make([]netmap.ReplicaDescriptor, 0, len(rs)) num := uint32(0) @@ -48,7 +59,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { for i := range ss { ns := make([]netmap.NodeInfo, 0, ss[i]) - for j := 0; j < ss[i]; j++ { + for range ss[i] { ns = append(ns, testNode(num)) num++ } @@ -56,7 +67,12 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { nodes = append(nodes, ns) var rd netmap.ReplicaDescriptor - rd.SetNumberOfObjects(uint32(rs[i])) + if len(rs) > 0 { + rd.SetNumberOfObjects(uint32(rs[i])) + } else { + rd.SetECDataCount(uint32(ec[i][0])) + rd.SetECParityCount(uint32(ec[i][1])) + } replicas = append(replicas, rd) } @@ -87,7 +103,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithoutSuccessTracking(), @@ -116,7 +132,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -125,7 +141,7 @@ func TestTraverserObjectScenarios(t *testing.T) { ) require.NoError(t, err) - for i := 0; i < len(nodes[0]); i++ { + for range len(nodes[0]) { require.NotNil(t, tr.Next()) } @@ -134,7 +150,7 @@ func TestTraverserObjectScenarios(t *testing.T) { err = n.FromIterator(netmapcore.Node(nodes[1][0])) require.NoError(t, err) - require.Equal(t, []Node{{addresses: n}}, tr.Next()) + require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next()) }) t.Run("put scenario", func(t *testing.T) { @@ -145,7 +161,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), ) @@ -164,7 +180,7 @@ func TestTraverserObjectScenarios(t *testing.T) { require.Empty(t, tr.Next()) require.False(t, tr.Success()) - for i := 0; i < replicas[curVector]; i++ { + for range replicas[curVector] { tr.SubmitSuccess() } } @@ -186,7 +202,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodes, cnr := testPlacement(selectors, replicas) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local) @@ -261,7 +277,7 @@ func TestTraverserRemValues(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithCopyNumbers(testCase.copyNumbers), @@ -275,3 +291,363 @@ func TestTraverserRemValues(t *testing.T) { }) } } + +type nodeState struct { + node *netmap.NodeInfo +} + +func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo { + return n.node +} + +func TestTraverserPriorityMetrics(t *testing.T) { + t.Run("one rep one metric", func(t *testing.T) { + selectors := []int{4} + replicas := []int{3} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "B") + // Node_3, PK - ip4/0.0.0.0/tcp/3 + nodes[0][3].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 3, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) + + next = tr.Next() + // The last node is + require.Equal(t, 1, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("one rep one metric fewer nodes", func(t *testing.T) { + selectors := []int{2} + replicas := []int{3} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A} ] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_1 B}, {Node_0 A} ] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("two reps two metrics", func(t *testing.T) { + selectors := []int{3, 3} + replicas := []int{2, 2} + + nodes, cnr := testPlacement(selectors, replicas) + + // REPLICA #1 + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + nodes[0][0].SetAttribute("UN-LOCODE", "RU LED") + + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL") + + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "A") + nodes[0][2].SetAttribute("UN-LOCODE", "RU LED") + + // REPLICA #2 + // Node_3 ip4/0.0.0.0/tcp/3 + nodes[1][0].SetAttribute("ClusterName", "B") + nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW") + + // Node_4, PK - ip4/0.0.0.0/tcp/4 + nodes[1][1].SetAttribute("ClusterName", "B") + nodes[1][1].SetAttribute("UN-LOCODE", "RU DME") + + // Node_5, PK - ip4/0.0.0.0/tcp/5 + nodes[1][2].SetAttribute("ClusterName", "B") + nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW") + + sdkNode := testNode(9) + sdkNode.SetAttribute("ClusterName", "B") + sdkNode.SetAttribute("UN-LOCODE", "RU DME") + + nodesCopy := copyVectors(nodes) + + m := []Metric{ + NewAttributeMetric("ClusterName"), + NewAttributeMetric("UN-LOCODE"), + } + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Check that nodes in the same cluster and + // in the same location should be the first in slice. + // Nodes which are follow criteria but stay outside the replica + // should be in the next slice. + + next := tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + + sdkNode.SetAttribute("ClusterName", "B") + sdkNode.SetAttribute("UN-LOCODE", "RU MOW") + + nodesCopy = copyVectors(nodes) + + tr, err = NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + next = tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + + sdkNode.SetAttribute("ClusterName", "A") + sdkNode.SetAttribute("UN-LOCODE", "RU LED") + + nodesCopy = copyVectors(nodes) + + tr, err = NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + next = tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("ec container", func(t *testing.T) { + selectors := []int{4} + ec := [][]int{{2, 1}} + + nodes, cnr := testECPlacement(selectors, ec) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "B") + // Node_3, PK - ip4/0.0.0.0/tcp/3 + nodes[0][3].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 3, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) + + next = tr.Next() + // The last node is + require.Equal(t, 1, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("one rep one geo metric", func(t *testing.T) { + t.Skip() + selectors := []int{2} + replicas := []int{2} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") + + sdkNode := testNode(2) + sdkNode.SetAttribute("UN-LOCODE", "FI HEL") + + nodesCopy := copyVectors(nodes) + + parser, err := NewMetricsParser("/path/to/locode_db") + require.NoError(t, err) + m, err := parser.ParseMetrics([]string{geoDistance}) + require.NoError(t, err) + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `$geoDistance` the order will be: + // [ {Node_0 RU MOW}, {Node_1 RU LED}] + // With priority metric `$geoDistance` the order should be: + // [ {Node_1 RU LED}, {Node_0 RU MOW}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) +} diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index c3c810001..e5f001d5a 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -4,9 +4,9 @@ import ( "context" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" @@ -57,14 +57,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ts, err := g.tsSource.Tombstone(ctx, a, epoch) if err != nil { - log.Warn( + log.Warn(ctx, logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else { - if ts != nil { - return g.handleTS(addrStr, ts, epoch) - } + } else if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) } // requested tombstone not @@ -72,12 +70,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr return false } -func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool { +func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool { for _, atr := range ts.Attributes() { - if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS { + if atr.Key() == objectV2.SysAttributeExpEpoch { epoch, err := strconv.ParseUint(atr.Value(), 10, 64) if err != nil { - g.log.Warn( + g.log.Warn(ctx, logs.TombstoneExpirationParseFailure, zap.Error(err), ) diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 9d33e8179..2147a32fe 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,6 +3,7 @@ package tombstone import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -23,7 +24,7 @@ type Option func(*cfg) func defaultCfg() *cfg { return &cfg{ - log: &logger.Logger{Logger: zap.NewNop()}, + log: logger.NewLoggerWrapper(zap.NewNop()), cacheSize: defaultLRUCacheSize, } } @@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - if err != nil { - panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 1ff07b05a..975941847 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - if p.s == nil { - panic("Tombstone source: nil object service") - } + assert.False(p.s == nil, "Tombstone source: nil object service") return Source(p) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index bf67ec4d4..dcaaec0b4 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -9,18 +9,29 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error { - cnr, err := p.cnrSrc.Get(objInfo.Address.Container()) + ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes( + attribute.String("address", objInfo.Address.String()), + attribute.Bool("is_linking_object", objInfo.IsLinkingObject), + attribute.Bool("is_ec_part", objInfo.ECInfo != nil), + attribute.String("type", objInfo.Type.String()), + )) + defer span.End() + + cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container()) if err != nil { if client.IsErrContainerNotFound(err) { - existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container()) + existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container()) if errWasRemoved != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved) } else if existed { @@ -37,7 +48,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er policy := cnr.Value.PlacementPolicy() if policycore.IsECPlacement(policy) { - return p.processECContainerObject(ctx, objInfo, policy) + return p.processECContainerObject(ctx, objInfo, cnr.Value) } return p.processRepContainerObject(ctx, objInfo, policy) } @@ -45,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { idObj := objInfo.Address.Object() idCnr := objInfo.Address.Container() - nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -75,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc } if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address), ) @@ -99,6 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // Number of copies that are stored on maintenance nodes. var uncheckedCopies int + var candidates []netmap.NodeInfo for i := 0; shortage > 0 && i < len(nodes); i++ { select { case <-ctx.Done(): @@ -106,71 +118,68 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe default: } - if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) { - requirements.needLocalCopy = true - - shortage-- - } else if nodes[i].IsMaintenance() { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - if status := checkedNodes.processStatus(nodes[i]); status.Processed() { - if status == nodeHoldsObject { - // node already contains replica, no need to replicate - nodes = append(nodes[:i], nodes[i+1:]...) - i-- - shortage-- - } - + var err error + st := checkedNodes.processStatus(nodes[i]) + if !st.Processed() { + st, err = p.checkStatus(ctx, addr, nodes[i]) + checkedNodes.set(nodes[i], st) + if st == nodeDoesNotHoldObject { + // 1. This is the first time the node is encountered (`!st.Processed()`). + // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). + // So we need to try to put an object to it. + candidates = append(candidates, nodes[i]) continue } - - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - - _, err := p.remoteHeader(callCtx, nodes[i], addr, false) - - cancel() - - if err == nil { - shortage-- - checkedNodes.submitReplicaHolder(nodes[i]) - } else { - if client.IsErrObjectNotFound(err) { - checkedNodes.submitReplicaCandidate(nodes[i]) - continue - } else if client.IsErrNodeUnderMaintenance(err) { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, - zap.Stringer("object", addr), - zap.String("error", err.Error()), - ) - } - } } - nodes = append(nodes[:i], nodes[i+1:]...) - i-- + switch st { + case nodeIsLocal: + requirements.needLocalCopy = true + + shortage-- + case nodeIsUnderMaintenance: + shortage-- + uncheckedCopies++ + + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, + zap.String("node", netmap.StringifyPublicKey(nodes[i]))) + case nodeHoldsObject: + shortage-- + case nodeDoesNotHoldObject: + case nodeStatusUnknown: + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, + zap.Stringer("object", addr), + zap.Error(err)) + default: + panic("unreachable") + } } - p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies) + p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies) } -// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values -// -// consider remote nodes under maintenance as problem OK. Such -// nodes MAY not respond with object, however, this is how we -// prevent spam with new replicas. -// However, additional copies should not be removed in this case, -// because we can remove the only copy this way. -func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { - checkedNodes.submitReplicaHolder(node) - shortage-- - uncheckedCopies++ +func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { + return nodeIsLocal, nil + } + if node.Status().IsMaintenance() { + return nodeIsUnderMaintenance, nil + } - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, - zap.String("node", netmap.StringifyPublicKey(node)), - ) - return shortage, uncheckedCopies + callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) + _, err := p.remoteHeader(callCtx, node, addr, false) + cancel() + + if err == nil { + return nodeHoldsObject, nil + } + if client.IsErrObjectNotFound(err) { + return nodeDoesNotHoldObject, nil + } + if client.IsErrNodeUnderMaintenance(err) { + return nodeIsUnderMaintenance, nil + } + return nodeStatusUnknown, err } func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, @@ -178,7 +187,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address ) { switch { case shortage > 0: - p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, + p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", addr), zap.Uint32("shortage", shortage), ) @@ -194,7 +203,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address case uncheckedCopies > 0: // If we have more copies than needed, but some of them are from the maintenance nodes, // save the local copy. - p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, + p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, zap.Int("count", uncheckedCopies)) case uncheckedCopies == 0: diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go index d4c7ccbf9..69879c439 100644 --- a/pkg/services/policer/check_test.go +++ b/pkg/services/policer/check_test.go @@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) { cache.SubmitSuccessfulReplication(node) require.Equal(t, cache.processStatus(node), nodeHoldsObject) - cache.submitReplicaCandidate(node) + cache.set(node, nodeDoesNotHoldObject) require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) - cache.submitReplicaHolder(node) + cache.set(node, nodeHoldsObject) require.Equal(t, cache.processStatus(node), nodeHoldsObject) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 0a118797d..fbdeb3148 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -10,6 +10,7 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" @@ -27,18 +28,18 @@ type ecChunkProcessResult struct { var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node") -func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { +func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { if objInfo.ECInfo == nil { - return p.processECContainerRepObject(ctx, objInfo, policy) + return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy()) } - return p.processECContainerECObject(ctx, objInfo, policy) + return p.processECContainerECObject(ctx, objInfo, cnr) } // processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects. // All of them must be stored on all of the container nodes. func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { objID := objInfo.Address.Object() - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -58,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes) if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address), ) @@ -67,8 +68,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec return nil } -func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy) +func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -85,12 +86,12 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object res := p.processECChunk(ctx, objInfo, nn[0]) if !res.validPlacement { // drop local chunk only if all required chunks are in place - res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0]) + res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr) } - p.adjustECPlacement(ctx, objInfo, nn[0], policy) + p.adjustECPlacement(ctx, objInfo, nn[0], cnr) if res.removeLocal { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) p.cbRedundantCopy(ctx, objInfo.Address) } return nil @@ -100,15 +101,15 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, } } - if requiredNode.IsMaintenance() { + if requiredNode.Status().IsMaintenance() { // consider maintenance mode has object, but do not drop local copy - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) return ecChunkProcessResult{} } @@ -119,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n if err == nil { removeLocalChunk = true } else if client.IsErrObjectNotFound(err) { - p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1)) + p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1)) task := replicator.Task{ NumCopies: 1, Addr: objInfo.Address, @@ -128,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n p.replicator.HandleReplicationTask(ctx, task, newNodeCache()) } else if client.IsErrNodeUnderMaintenance(err) { // consider maintenance mode has object, but do not drop local copy - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) } else { - p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error())) + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err)) } return ecChunkProcessResult{ @@ -138,20 +139,20 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n } } -func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool { +func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool { var parentAddress oid.Address parentAddress.SetContainer(objInfo.Address.Container()) parentAddress.SetObject(objInfo.ECInfo.ParentID) requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo) if len(requiredChunkIndexes) == 0 { - p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID)) + p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID)) return true } err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes) if err != nil { - p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress)) return false } if len(requiredChunkIndexes) == 0 { @@ -169,8 +170,9 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I addr.SetContainer(objInfo.Address.Container()) addr.SetObject(indexToObjectID[index]) p.replicator.HandlePullTask(ctx, replicator.Task{ - Addr: addr, - Nodes: candidates, + Addr: addr, + Nodes: candidates, + Container: cnr, }) } // there was some missing chunks, it's not ok @@ -183,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -208,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -222,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A var chunkID oid.ID if err := chunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) return false } if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID { - p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed), + p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed), zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) return false } @@ -237,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A for index, candidates := range required { if len(candidates) == 0 { - p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index)) + p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index)) return false } } @@ -245,7 +247,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A return true } -func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) { +func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) { var parentAddress oid.Address parentAddress.SetContainer(objInfo.Address.Container()) parentAddress.SetObject(objInfo.ECInfo.ParentID) @@ -258,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -269,18 +271,20 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info resolved[ch.Index] = append(resolved[ch.Index], n) var ecInfoChunkID oid.ID if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) return } if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID { - p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID), + p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID), zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) return } chunkIDs[ch.Index] = ecInfoChunkID } - } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { - p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) + } else if client.IsErrObjectAlreadyRemoved(err) { + restore = false + } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, Addr: objInfo.Address, @@ -292,21 +296,23 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total { return } - if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() { + if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() { var found []uint32 for i := range resolved { found = append(found, i) } - p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) + p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) return } - p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy) + p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr) } -func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) { - c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount())) +func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, + cnr containerSDK.Container, +) { + c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount())) if err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs) @@ -315,7 +321,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, } key, err := p.keyStorage.GetKey(nil) if err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } required := make([]bool, len(parts)) @@ -325,7 +331,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, } } if err := c.ReconstructParts(parts, required, key); err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } for idx, part := range parts { @@ -337,10 +343,11 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ - Addr: addr, - Obj: part, + Addr: addr, + Obj: part, + Container: cnr, }) } else { p.replicator.HandleReplicationTask(ctx, replicator.Task{ @@ -357,8 +364,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I parts := make([]*objectSDK.Object, objInfo.ECInfo.Total) errGroup, egCtx := errgroup.WithContext(ctx) for idx, nodes := range existedChunks { - idx := idx - nodes := nodes errGroup.Go(func() error { var objID oid.Address objID.SetContainer(parentAddress.Container()) @@ -366,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) @@ -374,7 +379,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I if err == nil { break } - p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey()))) + p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey()))) } if obj != nil { parts[idx] = obj @@ -383,7 +388,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I }) } if err := errGroup.Wait(); err != nil { - p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err)) return nil } return parts diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go index e230153f9..c6980536b 100644 --- a/pkg/services/policer/ec_test.go +++ b/pkg/services/policer/ec_test.go @@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } @@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index cd47cb0fc..c2157de5d 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -8,6 +8,9 @@ const ( nodeNotProcessed nodeProcessStatus = iota nodeDoesNotHoldObject nodeHoldsObject + nodeStatusUnknown + nodeIsUnderMaintenance + nodeIsLocal ) func (st nodeProcessStatus) Processed() bool { @@ -15,37 +18,19 @@ func (st nodeProcessStatus) Processed() bool { } // nodeCache tracks Policer's check progress. -type nodeCache map[uint64]bool +type nodeCache map[uint64]nodeProcessStatus func newNodeCache() nodeCache { - return make(map[uint64]bool) + return make(map[uint64]nodeProcessStatus) } -func (n nodeCache) set(node netmap.NodeInfo, val bool) { +func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { n[node.Hash()] = val } -// submits storage node as a candidate to store the object replica in case of -// shortage. -func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) { - n.set(node, false) -} - -// submits storage node as a current object replica holder. -func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) { - n.set(node, true) -} - // processStatus returns current processing status of the storage node. func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { - switch val, ok := n[node.Hash()]; { - case !ok: - return nodeNotProcessed - case val: - return nodeHoldsObject - default: - return nodeDoesNotHoldObject - } + return n[node.Hash()] } // SubmitSuccessfulReplication marks given storage node as a current object @@ -53,5 +38,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { // // SubmitSuccessfulReplication implements replicator.TaskResult. func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { - n.submitReplicaHolder(node) + n.set(node, nodeHoldsObject) } diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go index 9dbfd8b9f..5d59604c2 100644 --- a/pkg/services/policer/option.go +++ b/pkg/services/policer/option.go @@ -91,7 +91,7 @@ type cfg struct { func defaultCfg() *cfg { return &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), batchSize: 10, cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB sleepDuration: 1 * time.Second, @@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option { } } -// WithRemoteObjectHeader returns option to set remote object header receiver of Policer. +// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer. func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option { return func(c *cfg) { c.remoteHeader = v diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index 363c0b922..c91e7cc7c 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,13 +1,13 @@ package policer import ( + "fmt" "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" ) type objectsInWork struct { @@ -55,12 +55,8 @@ func New(opts ...Option) *Policer { opts[i](c) } - c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))} - cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - if err != nil { - panic(err) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) return &Policer{ cfg: c, diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index e353ea428..049c33753 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "slices" "sort" "testing" "time" @@ -36,10 +37,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) { // Container source and bury function buryCh := make(chan oid.Address) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -78,6 +79,7 @@ func TestProcessObject(t *testing.T) { maintenanceNodes []int wantRemoveRedundant bool wantReplicateTo []int + headResult map[int]error ecInfo *objectcore.ECInfo }{ { @@ -127,7 +129,7 @@ func TestProcessObject(t *testing.T) { nodeCount: 2, policy: `REP 2 REP 2`, placement: [][]int{{0, 1}, {0, 1}}, - wantReplicateTo: []int{1, 1}, // is this actually good? + wantReplicateTo: []int{1}, }, { desc: "lock object must be replicated to all nodes", @@ -145,6 +147,14 @@ func TestProcessObject(t *testing.T) { objHolders: []int{1}, maintenanceNodes: []int{2}, }, + { + desc: "preserve local copy when node response with MAINTENANCE", + nodeCount: 3, + policy: `REP 2`, + placement: [][]int{{1, 2}}, + objHolders: []int{1}, + headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)}, + }, { desc: "lock object must be replicated to all EC nodes", objType: objectSDK.TypeLock, @@ -161,6 +171,14 @@ func TestProcessObject(t *testing.T) { placement: [][]int{{0, 1, 2}}, wantReplicateTo: []int{1, 2}, }, + { + desc: "do not remove local copy when MAINTENANCE status is cached", + objType: objectSDK.TypeRegular, + nodeCount: 3, + policy: `REP 1 REP 1`, + placement: [][]int{{1, 2}, {1, 0}}, + headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)}, + }, } for i := range tests { @@ -174,7 +192,7 @@ func TestProcessObject(t *testing.T) { nodes[i].SetPublicKey([]byte{byte(i)}) } for _, i := range ti.maintenanceNodes { - nodes[i].SetMaintenance() + nodes[i].SetStatus(netmap.Maintenance) } var policy netmap.PlacementPolicy @@ -204,11 +222,14 @@ func TestProcessObject(t *testing.T) { t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) return nil, errors.New("unexpected object head") } - for _, i := range ti.objHolders { - if index == i { - return nil, nil + if ti.headResult != nil { + if err, ok := ti.headResult[index]; ok { + return nil, err } } + if slices.Contains(ti.objHolders, index) { + return nil, nil + } return nil, new(apistatus.ObjectNotFound) } @@ -217,14 +238,14 @@ func TestProcessObject(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(addr.Container()) { return cnr, nil } t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container()) return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -282,10 +303,10 @@ func TestProcessObjectError(t *testing.T) { cnr := &container.Container{} cnr.Value.Init() source := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return nil, new(apistatus.ContainerNotFound) }, } @@ -330,10 +351,10 @@ func TestIteratorContract(t *testing.T) { } containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -422,18 +443,22 @@ func (it *sliceKeySpaceIterator) Rewind() { } type containerSrc struct { - get func(id cid.ID) (*container.Container, error) - deletionInfo func(id cid.ID) (*container.DelInfo, error) + get func(ctx context.Context, id cid.ID) (*container.Container, error) + deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error) } -func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) } +func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return f.get(ctx, id) +} -func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) } +func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return f.deletionInfo(ctx, id) +} // placementBuilderFunc is a placement.Builder backed by a function type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) -func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return f(c, o, p) } diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index a5ebb0010..635a5683b 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -7,17 +7,20 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) func (p *Policer) Run(ctx context.Context) { p.shardPolicyWorker(ctx) - p.log.Info(logs.PolicerRoutineStopped) + p.log.Info(ctx, logs.PolicerRoutineStopped) } func (p *Policer) shardPolicyWorker(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) for { select { case <-ctx.Done(): @@ -33,7 +36,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit continue } - p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) + p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) } skipMap := newSkipMap() @@ -59,9 +62,9 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { if p.objsInWork.add(addr.Address) { err := p.processObject(ctx, addr) if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) { - p.log.Error(logs.PolicerUnableToProcessObj, + p.log.Error(ctx, logs.PolicerUnableToProcessObj, zap.Stringer("object", addr.Address), - zap.String("error", err.Error())) + zap.Error(err)) } p.cache.Add(addr.Address, time.Now()) p.objsInWork.remove(addr.Address) @@ -69,7 +72,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { } }) if err != nil { - p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err)) + p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err)) } } } diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 3d04b7084..8c6f0df06 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -5,8 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -27,7 +26,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.Uint32("amount of unfinished replicas", task.NumCopies), ) }() @@ -43,16 +42,15 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T var err error task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr) if err != nil { - p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return } } - prm := new(putsvc.RemotePutPrm). + prm := new(objectwriter.RemotePutPrm). WithObject(task.Obj) for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ { @@ -65,7 +63,6 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T log := p.log.With( zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])), zap.Stringer("object", task.Addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) callCtx, cancel := context.WithTimeout(ctx, p.putTimeout) @@ -75,11 +72,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T cancel() if err != nil { - log.Error(logs.ReplicatorCouldNotReplicateObject, - zap.String("error", err.Error()), + log.Error(ctx, logs.ReplicatorCouldNotReplicateObject, + zap.Error(err), ) } else { - log.Debug(logs.ReplicatorObjectSuccessfullyReplicated) + log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated) task.NumCopies-- diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index d178700f6..216fe4919 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,11 +3,12 @@ package replicator import ( "context" "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -21,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull")) + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) }() ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask", @@ -42,31 +43,24 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - var endpoints []string - node.IterateNetworkEndpoints(func(s string) bool { - endpoints = append(endpoints, s) - return false - }) - p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage, + endpoints := slices.Collect(node.NetworkEndpoints()) + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), - zap.Strings("endpoints", endpoints), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Strings("endpoints", endpoints)) } if obj == nil { - p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), - zap.Error(errFailedToGetObjectFromAnyNode), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errFailedToGetObjectFromAnyNode)) return } - err := engine.Put(ctx, p.localStorage, obj) + err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index c06ec3f65..bcad8471d 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -5,8 +5,8 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -19,7 +19,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull")) + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) }() ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask", @@ -30,18 +30,16 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { defer span.End() if task.Obj == nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(errObjectNotDefined), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errObjectNotDefined)) return } - err := engine.Put(ctx, p.localStorage, task.Obj) + err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index a67f2e766..a940cef37 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -4,10 +4,9 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -24,7 +23,7 @@ type cfg struct { log *logger.Logger - remoteSender *putsvc.RemoteSender + remoteSender *objectwriter.RemoteSender remoteGetter *getsvc.RemoteGetter @@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator { opts[i](c) } - c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))} - return &Replicator{ cfg: c, } @@ -67,7 +64,7 @@ func WithLogger(v *logger.Logger) Option { } // WithRemoteSender returns option to set remote object sender of Replicator. -func WithRemoteSender(v *putsvc.RemoteSender) Option { +func WithRemoteSender(v *objectwriter.RemoteSender) Option { return func(c *cfg) { c.remoteSender = v } diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go index d2b5b2506..a03f8dcaa 100644 --- a/pkg/services/replicator/task.go +++ b/pkg/services/replicator/task.go @@ -1,6 +1,7 @@ package replicator import ( + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -16,4 +17,6 @@ type Task struct { Obj *objectSDK.Object // Nodes is a list of potential object holders. Nodes []netmap.NodeInfo + + Container containerSDK.Container } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 76c220fab..f0591de71 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "go.uber.org/zap" ) @@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(logs.ServingRequest, - zap.String("component", "SessionService"), - zap.String("request", "Create"), - ) + s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go index 9e44ae667..e8555a7c9 100644 --- a/pkg/services/session/server.go +++ b/pkg/services/session/server.go @@ -3,7 +3,7 @@ package session import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) // Server is an interface of the FrostFS API Session service server. diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go index 690fff896..3664c1403 100644 --- a/pkg/services/session/sign.go +++ b/pkg/services/session/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) type signService struct { diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go index 21f55a7d1..ea0233f9a 100644 --- a/pkg/services/session/storage/persistent/executor.go +++ b/pkg/services/session/storage/persistent/executor.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.etcd.io/bbolt" diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go index 39cb14359..f80ecb591 100644 --- a/pkg/services/session/storage/persistent/executor_test.go +++ b/pkg/services/session/storage/persistent/executor_test.go @@ -8,8 +8,8 @@ import ( "path/filepath" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/stretchr/testify/require" @@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) { tokens := make([]tok, 0, tokenNumber) - for i := 0; i < tokenNumber; i++ { + for i := range tokenNumber { req.SetExpiration(uint64(i)) res, err := ts.Create(context.Background(), req) diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go index 411734ea1..60db97f90 100644 --- a/pkg/services/session/storage/persistent/options.go +++ b/pkg/services/session/storage/persistent/options.go @@ -19,7 +19,7 @@ type Option func(*cfg) func defaultCfg() *cfg { return &cfg{ - l: &logger.Logger{Logger: zap.L()}, + l: logger.NewLoggerWrapper(zap.L()), timeout: 100 * time.Millisecond, } } diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index 71711e371..132d62445 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -1,6 +1,7 @@ package persistent import ( + "context" "crypto/aes" "crypto/cipher" "encoding/hex" @@ -63,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) @@ -105,7 +106,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok return err }) if err != nil { - s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage, + s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage, zap.Error(err), zap.Stringer("ownerID", ownerID), zap.String("tokenID", hex.EncodeToString(tokenID)), @@ -130,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { if epochFromToken(v) <= epoch { err = c.Delete() if err != nil { - s.l.Error(logs.PersistentCouldNotDeleteSToken, + s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken, zap.String("token_id", hex.EncodeToString(k)), ) } @@ -141,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { }) }) if err != nil { - s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens, + s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens, zap.Uint64("epoch", epoch), ) } diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go index cd498709c..423e579d7 100644 --- a/pkg/services/session/storage/temporary/executor.go +++ b/pkg/services/session/storage/temporary/executor.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) s.mtx.Lock() s.tokens[key{ tokenID: base58.Encode(uidBytes), - ownerID: base58.Encode(id.WalletBytes()), + ownerID: id.EncodeToString(), }] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration()) s.mtx.Unlock() diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go index ee93dee71..c9da6b842 100644 --- a/pkg/services/session/storage/temporary/storage.go +++ b/pkg/services/session/storage/temporary/storage.go @@ -9,7 +9,9 @@ import ( ) type key struct { + // nolint:unused tokenID string + // nolint:unused ownerID string } @@ -39,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken s.mtx.RLock() t := s.tokens[key{ tokenID: base58.Encode(tokenID), - ownerID: base58.Encode(ownerID.WalletBytes()), + ownerID: ownerID.EncodeToString(), }] s.mtx.RUnlock() diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 6e78bf4ec..58757ff6d 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -2,44 +2,27 @@ package tree import ( "context" - "crypto/ecdsa" "encoding/hex" - "errors" "fmt" "net" "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "google.golang.org/grpc/peer" ) -var ( - errInvalidTargetType = errors.New("bearer token defines non-container target override") - errBearerExpired = errors.New("bearer token has expired") - errBearerInvalidSignature = errors.New("bearer token has invalid signature") - errBearerInvalidContainerID = errors.New("bearer token was created for another container") - errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") - errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") -) - func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -53,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()), nativeschema.PropertyKeyActorRole: schemaRole, } - reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey) + reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey) if err != nil { return aperequest.Request{}, err } @@ -70,65 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } + resProps := map[string]string{ + nativeschema.ProperyKeyTreeID: treeID, + } + return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, make(map[string]string)), + aperequest.NewResource(resourceName, resProps), reqProps, ), nil } -// isValidBearer checks whether bearer token was correctly signed by authorized -// entity. This method might be defined on whole ACL service because it will -// require fetching current epoch to check lifetime. -func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error { - if token == nil { - return nil - } - - // First check token lifetime. Simplest verification. - if token.InvalidAt(st.CurrentEpoch()) { - return errBearerExpired - } - - // Then check if bearer token is signed correctly. - if !token.VerifySignature() { - return errBearerInvalidSignature - } - - // Check for ape overrides defined in the bearer token. - apeOverride := token.APEOverride() - if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { - return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) - } - - // Then check if container is either empty or equal to the container in the request. - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID - } - - // Then check if container owner signed this token. - if !bearer.ResolveIssuer(*token).Equals(ownerCnr) { - return errBearerNotSignedByOwner - } - - // Then check if request sender has rights to use this token. - var usrSender user.ID - user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner - } - - return nil -} - func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -136,58 +73,27 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) if err != nil { - return apeErr(err) + return fmt.Errorf("failed to create ape request: %w", err) } - var cr engine.ChainRouter - if bt != nil && !bt.Impersonate() { - if err := isValidBearer(bt, container.Value.Owner(), cid, publicKey, s.state); err != nil { - return fmt.Errorf("bearer validation error: %w", err) - } - cr, err = router.BearerChainFeedRouter(s.localOverrideStorage, s.morphChainStorage, bt.APEOverride()) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) - } - } else { - cr = engine.NewDefaultChainRouterWithLocalOverrides(s.morphChainStorage, s.localOverrideStorage) - } - - groups, err := aperequest.Groups(s.frostfsidSubjectProvider, publicKey) - if err != nil { - return fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) - } - - rt := engine.NewRequestTargetExtended(namespace, cid.EncodeToString(), fmt.Sprintf("%s:%s", namespace, publicKey.Address()), groups) - status, found, err := cr.IsAllowed(apechain.Ingress, rt, request) - if err != nil { - return apeErr(err) - } - if found && status == apechain.Allow { - return nil - } - err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", request.Operation(), status.String()) - return apeErr(err) -} - -func apeErr(err error) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(err.Error()) - return errAccessDenied + return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{ + Request: request, + Namespace: namespace, + Container: cid, + ContainerOwner: container.Value.Owner(), + PublicKey: publicKey, + BearerToken: bt, + }) } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { +func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey) if err != nil { return reqProps, err } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go new file mode 100644 index 000000000..7b209fd47 --- /dev/null +++ b/pkg/services/tree/ape_test.go @@ -0,0 +1,246 @@ +package tree + +import ( + "context" + "encoding/hex" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" + core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +var ( + containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy" + + senderPrivateKey, _ = keys.NewPrivateKey() + + senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes()) + + rootCnr = &core.Container{Value: containerSDK.Container{}} +) + +type frostfsIDProviderMock struct { + subjects map[util.Uint160]*client.Subject + subjectsExtended map[util.Uint160]*client.SubjectExtended +} + +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { + v, ok := f.subjects[key] + if !ok { + return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) + } + return v, nil +} + +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { + v, ok := f.subjectsExtended[key] + if !ok { + return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) + } + return v, nil +} + +var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil) + +func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock { + return &frostfsIDProviderMock{ + subjects: map[util.Uint160]*client.Subject{ + scriptHashFromSenderKey(t, senderKey): { + Namespace: "testnamespace", + Name: "test", + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + }, + }, + subjectsExtended: map[util.Uint160]*client.SubjectExtended{ + scriptHashFromSenderKey(t, senderKey): { + Namespace: "testnamespace", + Name: "test", + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + Groups: []*client.Group{ + { + ID: 1, + Name: "test", + Namespace: "testnamespace", + KV: map[string]string{ + "attr1": "value1", + "attr2": "value2", + }, + }, + }, + }, + }, + } +} + +func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { + pk, err := keys.NewPublicKeyFromString(senderKey) + require.NoError(t, err) + return pk.GetScriptHash() +} + +type stMock struct{} + +func (m *stMock) CurrentEpoch() uint64 { + return 8 +} + +func TestCheckAPE(t *testing.T) { + cid := cid.ID{} + _ = cid.DecodeString(containerID) + + t.Run("treeID rule", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.QuotaLimitReached, + Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindResource, + Key: nativeschema.ProperyKeyTreeID, + Value: versionTreeID, + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) + + var chErr *checkercore.ChainRouterError + require.ErrorAs(t, err, &chErr) + require.Equal(t, chain.QuotaLimitReached, chErr.Status()) + }) + + t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringNotEquals, + Kind: chain.KindResource, + Key: nativeschema.PropertyKeyObjectType, + Value: "TOMBSTONE", + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.Allow, + Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + require.NoError(t, err) + }) + + t.Run("delete rule won't affect tree add", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.Allow, + Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringNotEquals, + Kind: chain.KindResource, + Key: nativeschema.PropertyKeyObjectType, + Value: "TOMBSTONE", + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + require.NoError(t, err) + }) +} diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 1be1c2f83..a11700771 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -2,23 +2,24 @@ package tree import ( "context" + "crypto/ecdsa" "errors" "fmt" "sync" "time" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { sync.Mutex simplelru.LRU[string, cacheItem] + key *ecdsa.PrivateKey + ds *internalNet.DialerSource } type cacheItem struct { @@ -34,18 +35,20 @@ const ( var errRecentlyFailed = errors.New("client has recently failed") -func (c *clientCache) init() { +func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) { if conn := value.cc; conn != nil { _ = conn.Close() } }) c.LRU = *l + c.key = pk + c.ds = ds } func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.LRU.Get(netmapAddr) + ccInt, ok := c.Get(netmapAddr) c.Unlock() if ok { @@ -63,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - cc, err := dialTreeService(ctx, netmapAddr) + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) lastTry := time.Now() c.Lock() if err != nil { - c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() @@ -80,32 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } - -func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - opts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), - ), - grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - ), - } - - if !netAddr.IsTLSEnabled() { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...) - cancel() - - return cc, err -} diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go index 435257550..c641a21a2 100644 --- a/pkg/services/tree/container.go +++ b/pkg/services/tree/container.go @@ -2,6 +2,7 @@ package tree import ( "bytes" + "context" "crypto/sha256" "fmt" "sync" @@ -32,13 +33,13 @@ type containerCacheItem struct { const defaultContainerCacheSize = 10 // getContainerNodes returns nodes in the container and a position of local key in the list. -func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { - nm, err := s.nmSource.GetNetMap(0) +func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { + nm, err := s.nmSource.GetNetMap(ctx, 0) if err != nil { return nil, -1, fmt.Errorf("can't get netmap: %w", err) } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return nil, -1, fmt.Errorf("can't get container: %w", err) } diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index 677431889..e7a13827e 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) { loop: for i := 1; i < len(acc.seen); i++ { parent := acc.seen[i].Body.ParentId - for j := 0; j < i; j++ { + for j := range i { if acc.seen[j].Body.NodeId[0] == parent[0] { continue loop } @@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) { t.Run("boltdb forest", func(t *testing.T) { p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))) require.NoError(t, p.Open(context.Background(), 0o644)) - require.NoError(t, p.Init()) + require.NoError(t, p.Init(context.Background())) testGetSubTreeOrderAsc(t, p) }) } diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go index 0f0e4ee57..07503f8c3 100644 --- a/pkg/services/tree/metrics.go +++ b/pkg/services/tree/metrics.go @@ -6,6 +6,7 @@ type MetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type defaultMetricsRegister struct{} @@ -13,3 +14,4 @@ type defaultMetricsRegister struct{} func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} +func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 6a20fe5cc..56cbcc081 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -1,9 +1,12 @@ package tree import ( + "context" "crypto/ecdsa" + "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -17,12 +20,12 @@ import ( type ContainerSource interface { container.Source - DeletionInfo(cid.ID) (*container.DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) // List must return list of all the containers in the FrostFS network // at the moment of a call and any error that does not allow fetching // container information. - List() ([]cid.ID, error) + List(ctx context.Context) ([]cid.ID, error) } type cfg struct { @@ -33,19 +36,20 @@ type cfg struct { nmSource netmap.Source cnrSource ContainerSource frostfsidSubjectProvider frostfsidcore.SubjectProvider - eaclSource container.EACLSource forest pilorama.Forest // replication-related parameters replicatorChannelCapacity int replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys [][]byte + authorizedKeys atomic.Pointer[[][]byte] + syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage morphChainStorage policyengine.MorphRuleChainStorageReader metrics MetricsRegister + ds *net.DialerSource } // Option represents configuration option for a tree service. @@ -65,14 +69,6 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option } } -// WithEACLSource sets a eACL table source for a tree service. -// This option is required. -func WithEACLSource(src container.EACLSource) Option { - return func(c *cfg) { - c.eaclSource = src - } -} - // WithNetmapSource sets a netmap source for a tree service. // This option is required. func WithNetmapSource(src netmap.Source) Option { @@ -120,6 +116,12 @@ func WithReplicationWorkerCount(n int) Option { } } +func WithSyncBatchSize(n int) Option { + return func(c *cfg) { + c.syncBatchSize = n + } +} + func WithContainerCacheSize(n int) Option { return func(c *cfg) { if n > 0 { @@ -146,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys = nil - for _, key := range keys { - c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) - } + c.authorizedKeys.Store(fromPublicKeys(keys)) } } @@ -170,3 +169,9 @@ func WithNetmapState(state netmap.State) Option { c.state = state } } + +func WithDialerSource(ds *net.DialerSource) Option { + return func(c *cfg) { + c.ds = ds + } +} diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go new file mode 100644 index 000000000..8f21686df --- /dev/null +++ b/pkg/services/tree/qos.go @@ -0,0 +1,101 @@ +package tree + +import ( + "context" + + "google.golang.org/grpc" +) + +var _ TreeServiceServer = (*ioTagAdjust)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type ioTagAdjust struct { + s TreeServiceServer + a AdjustIOTag +} + +func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { + return &ioTagAdjust{ + s: s, + a: a, + } +} + +func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Add(ctx, req) +} + +func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.AddByPath(ctx, req) +} + +func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Apply(ctx, req) +} + +func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.GetNodeByPath(ctx, req) +} + +func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Healthcheck(ctx, req) +} + +func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Move(ctx, req) +} + +func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Remove(ctx, req) +} + +func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.TreeList(ctx, req) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosServerWrapper[T any] struct { + grpc.ServerStream + sender qosSend[T] + ctxF func() context.Context +} + +func (w *qosServerWrapper[T]) Send(resp T) error { + return w.sender.Send(resp) +} + +func (w *qosServerWrapper[T]) Context() context.Context { + return w.ctxF() +} diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index ec41a60d4..647f8cb30 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -6,19 +6,32 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "google.golang.org/grpc" ) var errNoSuitableNode = errors.New("no node was found to execute the request") +func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { + var resp *Resp + var outErr error + err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = callback(c, fCtx, req) + return true + }) + if err != nil { + return nil, err + } + return resp, outErr +} + // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -28,25 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - n.IterateNetworkEndpoints(func(endpoint string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false + for endpoint := range n.NetworkEndpoints() { + stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { + called = true + return f(fCtx, c) + }) + if called { + break } - - s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - - called = true - stop = f(c) - return true - }) + } if stop { return nil } @@ -56,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } + +func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false + } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) + return f(ctx, c) +} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 346198b3c..ee40884eb 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -40,6 +39,7 @@ const ( defaultReplicatorCapacity = 64 defaultReplicatorWorkerCount = 64 defaultReplicatorSendTimeout = time.Second * 5 + defaultSyncBatchSize = 1000 ) func (s *Service) localReplicationWorker(ctx context.Context) { @@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) { err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) if err != nil { - s.log.Error(logs.TreeFailedToApplyReplicatedOperation, - zap.String("err", err.Error())) + s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation, + zap.Error(err)) } span.End() } @@ -89,41 +89,23 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - n.IterateNetworkEndpoints(func(addr string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - + for addr := range n.NetworkEndpoints() { lastAddr = addr - - c, err := s.cache.get(ctx, addr) - if err != nil { - lastErr = fmt.Errorf("can't create client: %w", err) - return false + lastErr = s.apply(ctx, n, addr, req) + if lastErr == nil { + break } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, lastErr = c.Apply(ctx, req) - cancel() - - return lastErr == nil - }) + } if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { - s.log.Debug(logs.TreeDoNotSendUpdateToTheNode, - zap.String("last_error", lastErr.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, + zap.String("last_error", lastErr.Error())) } else { - s.log.Warn(logs.TreeFailedToSentUpdateToTheNode, + s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), - zap.String("key", hex.EncodeToString(n.PublicKey())), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("key", hex.EncodeToString(n.PublicKey()))) } s.metrics.AddReplicateTaskDuration(time.Since(start), false) return lastErr @@ -132,8 +114,28 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } +func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + + c, err := s.cache.get(ctx, addr) + if err != nil { + return fmt.Errorf("can't create client: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, err = c.Apply(ctx, req) + cancel() + return err +} + func (s *Service) replicateLoop(ctx context.Context) { - for i := 0; i < s.replicatorWorkerCount; i++ { + for range s.replicatorWorkerCount { go s.replicationWorker(ctx) go s.localReplicationWorker(ctx) } @@ -151,10 +153,10 @@ func (s *Service) replicateLoop(ctx context.Context) { return case op := <-s.replicateCh: start := time.Now() - err := s.replicate(op) + err := s.replicate(ctx, op) if err != nil { - s.log.Error(logs.TreeErrorDuringReplication, - zap.String("err", err.Error()), + s.log.Error(ctx, logs.TreeErrorDuringReplication, + zap.Error(err), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) } @@ -163,14 +165,14 @@ func (s *Service) replicateLoop(ctx context.Context) { } } -func (s *Service) replicate(op movePair) error { +func (s *Service) replicate(ctx context.Context, op movePair) error { req := newApplyRequest(&op) err := SignMessage(req, s.key) if err != nil { return fmt.Errorf("can't sign data: %w", err) } - nodes, localIndex, err := s.getContainerNodes(op.cid) + nodes, localIndex, err := s.getContainerNodes(ctx, op.cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -204,7 +206,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Meta.Bytes(), + Meta: op.op.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 2012f53d2..3994d6973 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -5,15 +5,19 @@ import ( "context" "errors" "fmt" - "sort" + "slices" "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -37,6 +41,8 @@ type Service struct { initialSyncDone atomic.Bool + apeChecker checkercore.CheckCore + // cnrMap contains existing (used) container IDs. cnrMap map[cidSDK.ID]struct{} // cnrMapMtx protects cnrMap @@ -52,17 +58,19 @@ func New(opts ...Option) *Service { s.replicatorChannelCapacity = defaultReplicatorCapacity s.replicatorWorkerCount = defaultReplicatorWorkerCount s.replicatorTimeout = defaultReplicatorSendTimeout + s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} + s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) } if s.log == nil { - s.log = &logger.Logger{Logger: zap.NewNop()} + s.log = logger.NewLoggerWrapper(zap.NewNop()) } - s.cache.init() + s.cache.init(s.key, s.ds) s.closeCh = make(chan struct{}) s.replicateCh = make(chan movePair, s.replicatorChannelCapacity) s.replicateLocalCh = make(chan applyOp) @@ -72,11 +80,14 @@ func New(opts ...Option) *Service { s.syncChan = make(chan struct{}) s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount) + s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state) + return &s } // Start starts the service. func (s *Service) Start(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) @@ -96,6 +107,7 @@ func (s *Service) Shutdown() { } func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -107,26 +119,17 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *AddResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Add(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add) } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} @@ -148,6 +151,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -159,26 +163,17 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *AddByPathResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.AddByPath(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath) } meta := protoToMeta(b.GetMeta()) @@ -212,6 +207,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -223,26 +219,17 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *RemoveResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Remove(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove) } if b.GetNodeId() == pilorama.RootID { @@ -265,6 +252,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon // Move applies client operation to the specified tree and pushes in queue // for replication on other nodes. func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -276,26 +264,17 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *MoveResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Move(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move) } if b.GetNodeId() == pilorama.RootID { @@ -317,6 +296,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -328,26 +308,17 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *GetNodeByPathResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.GetNodeByPath(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath) } attr := b.GetPathAttribute() @@ -360,7 +331,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - info := make([]*GetNodeByPathResponse_Info, 0, len(nodes)) + info := make([]GetNodeByPathResponse_Info, 0, len(nodes)) for _, node := range nodes { m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node) if err != nil { @@ -374,21 +345,18 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) if b.GetAllAttributes() { x.Meta = metaToProto(m.Items) } else { - var metaValue []*KeyValue + var metaValue []KeyValue for _, kv := range m.Items { - for _, attr := range b.GetAttributes() { - if kv.Key == attr { - metaValue = append(metaValue, &KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) - break - } + if slices.Contains(b.GetAttributes(), kv.Key) { + metaValue = append(metaValue, KeyValue{ + Key: kv.Key, + Value: kv.Value, + }) } } x.Meta = metaValue } - info = append(info, &x) + info = append(info, x) } return &GetNodeByPathResponse{ @@ -399,6 +367,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -410,20 +379,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(srv.Context(), req) + err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(fCtx, req) return true }) if err != nil { @@ -445,7 +414,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS type stackItem struct { values []pilorama.MultiNodeInfo parent pilorama.MultiNode - last *string + last *pilorama.Cursor } func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { @@ -469,10 +438,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else { - if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") - } + } else if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") } ts = append(ts, m.Time) ps = append(ps, p) @@ -496,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid break } - nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) + var err error + item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) if err != nil { return err } - item.values = nodes - item.last = last - if len(nodes) == 0 { + if len(item.values) == 0 { stack = stack[:len(stack)-1] continue } @@ -615,10 +581,9 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di if len(nodes) == 0 { return nodes, nil } - less := func(i, j int) bool { - return bytes.Compare(nodes[i].Meta.GetAttr(pilorama.AttributeFilename), nodes[j].Meta.GetAttr(pilorama.AttributeFilename)) < 0 - } - sort.Slice(nodes, less) + slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int { + return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename)) + }) return nodes, nil default: return nil, fmt.Errorf("unsupported order direction: %s", d.String()) @@ -626,7 +591,8 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di } // Apply locally applies operation from the remote node to the tree. -func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) { +func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) err := verifyMessage(req) if err != nil { return nil, err @@ -639,7 +605,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e key := req.GetSignature().GetKey() - _, pos, _, err := s.getContainerInfo(cid, key) + _, pos, _, err := s.getContainerInfo(ctx, cid, key) if err != nil { return nil, err } @@ -670,6 +636,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e } func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -681,15 +648,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(srv.Context(), req) + err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(fCtx, req) return true }) if err != nil { @@ -720,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Meta.Bytes(), + Meta: lm.Bytes(), ChildId: lm.Child, }, }, @@ -734,6 +701,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) } func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -753,21 +721,12 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } if pos < 0 { - var resp *TreeListResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) - return outErr == nil - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList) } ids, err := s.forest.TreeList(ctx, cid) @@ -782,21 +741,19 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList }, nil } -func protoToMeta(arr []*KeyValue) []pilorama.KeyValue { +func protoToMeta(arr []KeyValue) []pilorama.KeyValue { meta := make([]pilorama.KeyValue, len(arr)) for i, kv := range arr { - if kv != nil { - meta[i].Key = kv.GetKey() - meta[i].Value = kv.GetValue() - } + meta[i].Key = kv.GetKey() + meta[i].Value = kv.GetValue() } return meta } -func metaToProto(arr []pilorama.KeyValue) []*KeyValue { - meta := make([]*KeyValue, len(arr)) +func metaToProto(arr []pilorama.KeyValue) []KeyValue { + meta := make([]KeyValue, len(arr)) for i, kv := range arr { - meta[i] = &KeyValue{ + meta[i] = KeyValue{ Key: kv.Key, Value: kv.Value, } @@ -806,8 +763,8 @@ func metaToProto(arr []pilorama.KeyValue) []*KeyValue { // getContainerInfo returns the list of container nodes, position in the container for the node // with pub key and total amount of nodes in all replicas. -func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { - cntNodes, _, err := s.getContainerNodes(cid) +func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { + cntNodes, _, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, 0, 0, err } @@ -827,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } + +func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { + s.authorizedKeys.Store(fromPublicKeys(newKeys)) +} + +func fromPublicKeys(keys keys.PublicKeys) *[][]byte { + buff := make([][]byte, len(keys)) + for i, k := range keys { + buff[i] = k.Bytes() + } + return &buff +} diff --git a/pkg/services/tree/service.pb.go b/pkg/services/tree/service.pb.go deleted file mode 100644 index f439e3f28..000000000 --- a/pkg/services/tree/service.pb.go +++ /dev/null @@ -1,3587 +0,0 @@ -//* -// Service for working with CRDT tree. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/tree/service.proto - -package tree - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type GetSubTreeRequest_Body_Order_Direction int32 - -const ( - GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0 - GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1 -) - -// Enum value maps for GetSubTreeRequest_Body_Order_Direction. -var ( - GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{ - 0: "None", - 1: "Asc", - } - GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{ - "None": 0, - "Asc": 1, - } -) - -func (x GetSubTreeRequest_Body_Order_Direction) Enum() *GetSubTreeRequest_Body_Order_Direction { - p := new(GetSubTreeRequest_Body_Order_Direction) - *p = x - return p -} - -func (x GetSubTreeRequest_Body_Order_Direction) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GetSubTreeRequest_Body_Order_Direction) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_services_tree_service_proto_enumTypes[0].Descriptor() -} - -func (GetSubTreeRequest_Body_Order_Direction) Type() protoreflect.EnumType { - return &file_pkg_services_tree_service_proto_enumTypes[0] -} - -func (x GetSubTreeRequest_Body_Order_Direction) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GetSubTreeRequest_Body_Order_Direction.Descriptor instead. -func (GetSubTreeRequest_Body_Order_Direction) EnumDescriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0, 0} -} - -type AddRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *AddRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddRequest) Reset() { - *x = AddRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddRequest) ProtoMessage() {} - -func (x *AddRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead. -func (*AddRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0} -} - -func (x *AddRequest) GetBody() *AddRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type AddResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *AddResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddResponse) Reset() { - *x = AddResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddResponse) ProtoMessage() {} - -func (x *AddResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddResponse.ProtoReflect.Descriptor instead. -func (*AddResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1} -} - -func (x *AddResponse) GetBody() *AddResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type AddByPathRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *AddByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddByPathRequest) Reset() { - *x = AddByPathRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddByPathRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddByPathRequest) ProtoMessage() {} - -func (x *AddByPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddByPathRequest.ProtoReflect.Descriptor instead. -func (*AddByPathRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2} -} - -func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddByPathRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type AddByPathResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *AddByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *AddByPathResponse) Reset() { - *x = AddByPathResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddByPathResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddByPathResponse) ProtoMessage() {} - -func (x *AddByPathResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddByPathResponse.ProtoReflect.Descriptor instead. -func (*AddByPathResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3} -} - -func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *AddByPathResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *RemoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveRequest) Reset() { - *x = RemoveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveRequest) ProtoMessage() {} - -func (x *RemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead. -func (*RemoveRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4} -} - -func (x *RemoveRequest) GetBody() *RemoveRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type RemoveResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *RemoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *RemoveResponse) Reset() { - *x = RemoveResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveResponse) ProtoMessage() {} - -func (x *RemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead. -func (*RemoveResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5} -} - -func (x *RemoveResponse) GetBody() *RemoveResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *RemoveResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type MoveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *MoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *MoveRequest) Reset() { - *x = MoveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveRequest) ProtoMessage() {} - -func (x *MoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead. -func (*MoveRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6} -} - -func (x *MoveRequest) GetBody() *MoveRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *MoveRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type MoveResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *MoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *MoveResponse) Reset() { - *x = MoveResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveResponse) ProtoMessage() {} - -func (x *MoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead. -func (*MoveResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7} -} - -func (x *MoveResponse) GetBody() *MoveResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *MoveResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetNodeByPathRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *GetNodeByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetNodeByPathRequest) Reset() { - *x = GetNodeByPathRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodeByPathRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodeByPathRequest) ProtoMessage() {} - -func (x *GetNodeByPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodeByPathRequest.ProtoReflect.Descriptor instead. -func (*GetNodeByPathRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8} -} - -func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetNodeByPathRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetNodeByPathResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *GetNodeByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetNodeByPathResponse) Reset() { - *x = GetNodeByPathResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodeByPathResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodeByPathResponse) ProtoMessage() {} - -func (x *GetNodeByPathResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodeByPathResponse.ProtoReflect.Descriptor instead. -func (*GetNodeByPathResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9} -} - -func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetNodeByPathResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetSubTreeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *GetSubTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetSubTreeRequest) Reset() { - *x = GetSubTreeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubTreeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubTreeRequest) ProtoMessage() {} - -func (x *GetSubTreeRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubTreeRequest.ProtoReflect.Descriptor instead. -func (*GetSubTreeRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10} -} - -func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetSubTreeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetSubTreeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *GetSubTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetSubTreeResponse) Reset() { - *x = GetSubTreeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubTreeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubTreeResponse) ProtoMessage() {} - -func (x *GetSubTreeResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubTreeResponse.ProtoReflect.Descriptor instead. -func (*GetSubTreeResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11} -} - -func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetSubTreeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type TreeListRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *TreeListRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *TreeListRequest) Reset() { - *x = TreeListRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeListRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeListRequest) ProtoMessage() {} - -func (x *TreeListRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeListRequest.ProtoReflect.Descriptor instead. -func (*TreeListRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12} -} - -func (x *TreeListRequest) GetBody() *TreeListRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *TreeListRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type TreeListResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *TreeListResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *TreeListResponse) Reset() { - *x = TreeListResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeListResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeListResponse) ProtoMessage() {} - -func (x *TreeListResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeListResponse.ProtoReflect.Descriptor instead. -func (*TreeListResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13} -} - -func (x *TreeListResponse) GetBody() *TreeListResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *TreeListResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type ApplyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *ApplyRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ApplyRequest) Reset() { - *x = ApplyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApplyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApplyRequest) ProtoMessage() {} - -func (x *ApplyRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead. -func (*ApplyRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14} -} - -func (x *ApplyRequest) GetBody() *ApplyRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ApplyRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type ApplyResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *ApplyResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *ApplyResponse) Reset() { - *x = ApplyResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApplyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApplyResponse) ProtoMessage() {} - -func (x *ApplyResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead. -func (*ApplyResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15} -} - -func (x *ApplyResponse) GetBody() *ApplyResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *ApplyResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetOpLogRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *GetOpLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetOpLogRequest) Reset() { - *x = GetOpLogRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpLogRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpLogRequest) ProtoMessage() {} - -func (x *GetOpLogRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpLogRequest.ProtoReflect.Descriptor instead. -func (*GetOpLogRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16} -} - -func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetOpLogRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type GetOpLogResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *GetOpLogResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *GetOpLogResponse) Reset() { - *x = GetOpLogResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpLogResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpLogResponse) ProtoMessage() {} - -func (x *GetOpLogResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpLogResponse.ProtoReflect.Descriptor instead. -func (*GetOpLogResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17} -} - -func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *GetOpLogResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type HealthcheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Response body. - Body *HealthcheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Response signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthcheckResponse) Reset() { - *x = HealthcheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthcheckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthcheckResponse) ProtoMessage() {} - -func (x *HealthcheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthcheckResponse.ProtoReflect.Descriptor instead. -func (*HealthcheckResponse) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18} -} - -func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthcheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type HealthcheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request body. - Body *HealthcheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` - // Request signature. - Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` -} - -func (x *HealthcheckRequest) Reset() { - *x = HealthcheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthcheckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthcheckRequest) ProtoMessage() {} - -func (x *HealthcheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthcheckRequest.ProtoReflect.Descriptor instead. -func (*HealthcheckRequest) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19} -} - -func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} - -func (x *HealthcheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type AddRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // ID of the parent to attach node to. - ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` - // Key-Value pairs with meta information. - Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` -} - -func (x *AddRequest_Body) Reset() { - *x = AddRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddRequest_Body) ProtoMessage() {} - -func (x *AddRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddRequest_Body.ProtoReflect.Descriptor instead. -func (*AddRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *AddRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *AddRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *AddRequest_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} - -func (x *AddRequest_Body) GetMeta() []*KeyValue { - if x != nil { - return x.Meta - } - return nil -} - -func (x *AddRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -type AddResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the created node. - NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` -} - -func (x *AddResponse_Body) Reset() { - *x = AddResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddResponse_Body) ProtoMessage() {} - -func (x *AddResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddResponse_Body.ProtoReflect.Descriptor instead. -func (*AddResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1, 0} -} - -func (x *AddResponse_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} - -type AddByPathRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // Attribute to build path with. Default: "FileName". - PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"` - // List of path components. - Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"` - // Node meta-information. - Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` -} - -func (x *AddByPathRequest_Body) Reset() { - *x = AddByPathRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddByPathRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddByPathRequest_Body) ProtoMessage() {} - -func (x *AddByPathRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddByPathRequest_Body.ProtoReflect.Descriptor instead. -func (*AddByPathRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *AddByPathRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *AddByPathRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *AddByPathRequest_Body) GetPathAttribute() string { - if x != nil { - return x.PathAttribute - } - return "" -} - -func (x *AddByPathRequest_Body) GetPath() []string { - if x != nil { - return x.Path - } - return nil -} - -func (x *AddByPathRequest_Body) GetMeta() []*KeyValue { - if x != nil { - return x.Meta - } - return nil -} - -func (x *AddByPathRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -type AddByPathResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of all created nodes. The first one is the leaf. - Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes,proto3" json:"nodes,omitempty"` - // ID of the parent node where new nodes were attached. - ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` -} - -func (x *AddByPathResponse_Body) Reset() { - *x = AddByPathResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddByPathResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddByPathResponse_Body) ProtoMessage() {} - -func (x *AddByPathResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddByPathResponse_Body.ProtoReflect.Descriptor instead. -func (*AddByPathResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *AddByPathResponse_Body) GetNodes() []uint64 { - if x != nil { - return x.Nodes - } - return nil -} - -func (x *AddByPathResponse_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} - -type RemoveRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // ID of the node to remove. - NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,4,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` -} - -func (x *RemoveRequest_Body) Reset() { - *x = RemoveRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveRequest_Body) ProtoMessage() {} - -func (x *RemoveRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveRequest_Body.ProtoReflect.Descriptor instead. -func (*RemoveRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *RemoveRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *RemoveRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *RemoveRequest_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} - -func (x *RemoveRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -type RemoveResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RemoveResponse_Body) Reset() { - *x = RemoveResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveResponse_Body) ProtoMessage() {} - -func (x *RemoveResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveResponse_Body.ProtoReflect.Descriptor instead. -func (*RemoveResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5, 0} -} - -type MoveRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // TODO import neo.fs.v2.refs.ContainerID directly. - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // ID of the new parent. - ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` - // ID of the node to move. - NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // Node meta-information. - Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` -} - -func (x *MoveRequest_Body) Reset() { - *x = MoveRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MoveRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveRequest_Body) ProtoMessage() {} - -func (x *MoveRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveRequest_Body.ProtoReflect.Descriptor instead. -func (*MoveRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *MoveRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *MoveRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *MoveRequest_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} - -func (x *MoveRequest_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} - -func (x *MoveRequest_Body) GetMeta() []*KeyValue { - if x != nil { - return x.Meta - } - return nil -} - -func (x *MoveRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -type MoveResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *MoveResponse_Body) Reset() { - *x = MoveResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MoveResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveResponse_Body) ProtoMessage() {} - -func (x *MoveResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveResponse_Body.ProtoReflect.Descriptor instead. -func (*MoveResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7, 0} -} - -type GetNodeByPathRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // Attribute to build path with. Default: "FileName". - PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"` - // List of path components. - Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"` - // List of attributes to include in response. - Attributes []string `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty"` - // Flag to return only the latest version of node. - LatestOnly bool `protobuf:"varint,6,opt,name=latest_only,json=latestOnly,proto3" json:"latest_only,omitempty"` - // Flag to return all stored attributes. - AllAttributes bool `protobuf:"varint,7,opt,name=all_attributes,json=allAttributes,proto3" json:"all_attributes,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,8,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` -} - -func (x *GetNodeByPathRequest_Body) Reset() { - *x = GetNodeByPathRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodeByPathRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodeByPathRequest_Body) ProtoMessage() {} - -func (x *GetNodeByPathRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodeByPathRequest_Body.ProtoReflect.Descriptor instead. -func (*GetNodeByPathRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8, 0} -} - -func (x *GetNodeByPathRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *GetNodeByPathRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *GetNodeByPathRequest_Body) GetPathAttribute() string { - if x != nil { - return x.PathAttribute - } - return "" -} - -func (x *GetNodeByPathRequest_Body) GetPath() []string { - if x != nil { - return x.Path - } - return nil -} - -func (x *GetNodeByPathRequest_Body) GetAttributes() []string { - if x != nil { - return x.Attributes - } - return nil -} - -func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool { - if x != nil { - return x.LatestOnly - } - return false -} - -func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool { - if x != nil { - return x.AllAttributes - } - return false -} - -func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -// Information about a single tree node. -type GetNodeByPathResponse_Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Node ID. - NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // Timestamp of the last operation with the node. - Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Node meta-information. - Meta []*KeyValue `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty"` - // Parent ID. - ParentId uint64 `protobuf:"varint,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` -} - -func (x *GetNodeByPathResponse_Info) Reset() { - *x = GetNodeByPathResponse_Info{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodeByPathResponse_Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodeByPathResponse_Info) ProtoMessage() {} - -func (x *GetNodeByPathResponse_Info) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodeByPathResponse_Info.ProtoReflect.Descriptor instead. -func (*GetNodeByPathResponse_Info) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} - -func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue { - if x != nil { - return x.Meta - } - return nil -} - -func (x *GetNodeByPathResponse_Info) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} - -type GetNodeByPathResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of nodes stored by path. - Nodes []*GetNodeByPathResponse_Info `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` -} - -func (x *GetNodeByPathResponse_Body) Reset() { - *x = GetNodeByPathResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNodeByPathResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNodeByPathResponse_Body) ProtoMessage() {} - -func (x *GetNodeByPathResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNodeByPathResponse_Body.ProtoReflect.Descriptor instead. -func (*GetNodeByPathResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 1} -} - -func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info { - if x != nil { - return x.Nodes - } - return nil -} - -type GetSubTreeRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // IDs of the root nodes of a subtree forest. - RootId []uint64 `protobuf:"varint,3,rep,name=root_id,json=rootId,proto3" json:"root_id,omitempty"` - // Optional depth of the traversal. Zero means return only root. - // Maximum depth is 10. - Depth uint32 `protobuf:"varint,4,opt,name=depth,proto3" json:"depth,omitempty"` - // Bearer token in V2 format. - BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` - // Result ordering. - OrderBy *GetSubTreeRequest_Body_Order `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` -} - -func (x *GetSubTreeRequest_Body) Reset() { - *x = GetSubTreeRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubTreeRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubTreeRequest_Body) ProtoMessage() {} - -func (x *GetSubTreeRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubTreeRequest_Body.ProtoReflect.Descriptor instead. -func (*GetSubTreeRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0} -} - -func (x *GetSubTreeRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *GetSubTreeRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *GetSubTreeRequest_Body) GetRootId() []uint64 { - if x != nil { - return x.RootId - } - return nil -} - -func (x *GetSubTreeRequest_Body) GetDepth() uint32 { - if x != nil { - return x.Depth - } - return 0 -} - -func (x *GetSubTreeRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} - -func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order { - if x != nil { - return x.OrderBy - } - return nil -} - -type GetSubTreeRequest_Body_Order struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Direction GetSubTreeRequest_Body_Order_Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=tree.GetSubTreeRequest_Body_Order_Direction" json:"direction,omitempty"` -} - -func (x *GetSubTreeRequest_Body_Order) Reset() { - *x = GetSubTreeRequest_Body_Order{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubTreeRequest_Body_Order) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubTreeRequest_Body_Order) ProtoMessage() {} - -func (x *GetSubTreeRequest_Body_Order) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubTreeRequest_Body_Order.ProtoReflect.Descriptor instead. -func (*GetSubTreeRequest_Body_Order) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0} -} - -func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction { - if x != nil { - return x.Direction - } - return GetSubTreeRequest_Body_Order_None -} - -type GetSubTreeResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the node. - NodeId []uint64 `protobuf:"varint,1,rep,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // ID of the parent. - ParentId []uint64 `protobuf:"varint,2,rep,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` - // Time node was first added to a tree. - Timestamp []uint64 `protobuf:"varint,3,rep,name=timestamp,proto3" json:"timestamp,omitempty"` - // Node meta-information. - Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"` -} - -func (x *GetSubTreeResponse_Body) Reset() { - *x = GetSubTreeResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSubTreeResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSubTreeResponse_Body) ProtoMessage() {} - -func (x *GetSubTreeResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSubTreeResponse_Body.ProtoReflect.Descriptor instead. -func (*GetSubTreeResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 { - if x != nil { - return x.NodeId - } - return nil -} - -func (x *GetSubTreeResponse_Body) GetParentId() []uint64 { - if x != nil { - return x.ParentId - } - return nil -} - -func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue { - if x != nil { - return x.Meta - } - return nil -} - -type TreeListRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` -} - -func (x *TreeListRequest_Body) Reset() { - *x = TreeListRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeListRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeListRequest_Body) ProtoMessage() {} - -func (x *TreeListRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeListRequest_Body.ProtoReflect.Descriptor instead. -func (*TreeListRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *TreeListRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -type TreeListResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Tree IDs. - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` -} - -func (x *TreeListResponse_Body) Reset() { - *x = TreeListResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeListResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeListResponse_Body) ProtoMessage() {} - -func (x *TreeListResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeListResponse_Body.ProtoReflect.Descriptor instead. -func (*TreeListResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13, 0} -} - -func (x *TreeListResponse_Body) GetIds() []string { - if x != nil { - return x.Ids - } - return nil -} - -type ApplyRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // Operation to be applied. - Operation *LogMove `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"` -} - -func (x *ApplyRequest_Body) Reset() { - *x = ApplyRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApplyRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApplyRequest_Body) ProtoMessage() {} - -func (x *ApplyRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApplyRequest_Body.ProtoReflect.Descriptor instead. -func (*ApplyRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14, 0} -} - -func (x *ApplyRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *ApplyRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *ApplyRequest_Body) GetOperation() *LogMove { - if x != nil { - return x.Operation - } - return nil -} - -type ApplyResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ApplyResponse_Body) Reset() { - *x = ApplyResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApplyResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApplyResponse_Body) ProtoMessage() {} - -func (x *ApplyResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApplyResponse_Body.ProtoReflect.Descriptor instead. -func (*ApplyResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15, 0} -} - -type GetOpLogRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container ID in V2 format. - ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // The name of the tree. - TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - // Starting height to return logs from. - Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - // Amount of operations to return. - Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` -} - -func (x *GetOpLogRequest_Body) Reset() { - *x = GetOpLogRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpLogRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpLogRequest_Body) ProtoMessage() {} - -func (x *GetOpLogRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpLogRequest_Body.ProtoReflect.Descriptor instead. -func (*GetOpLogRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16, 0} -} - -func (x *GetOpLogRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} - -func (x *GetOpLogRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} - -func (x *GetOpLogRequest_Body) GetHeight() uint64 { - if x != nil { - return x.Height - } - return 0 -} - -func (x *GetOpLogRequest_Body) GetCount() uint64 { - if x != nil { - return x.Count - } - return 0 -} - -type GetOpLogResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Operation on a tree. - Operation *LogMove `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` -} - -func (x *GetOpLogResponse_Body) Reset() { - *x = GetOpLogResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetOpLogResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetOpLogResponse_Body) ProtoMessage() {} - -func (x *GetOpLogResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetOpLogResponse_Body.ProtoReflect.Descriptor instead. -func (*GetOpLogResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17, 0} -} - -func (x *GetOpLogResponse_Body) GetOperation() *LogMove { - if x != nil { - return x.Operation - } - return nil -} - -type HealthcheckResponse_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *HealthcheckResponse_Body) Reset() { - *x = HealthcheckResponse_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthcheckResponse_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthcheckResponse_Body) ProtoMessage() {} - -func (x *HealthcheckResponse_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthcheckResponse_Body.ProtoReflect.Descriptor instead. -func (*HealthcheckResponse_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18, 0} -} - -type HealthcheckRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *HealthcheckRequest_Body) Reset() { - *x = HealthcheckRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_service_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthcheckRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthcheckRequest_Body) ProtoMessage() {} - -func (x *HealthcheckRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_service_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthcheckRequest_Body.ProtoReflect.Descriptor instead. -func (*HealthcheckRequest_Body) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19, 0} -} - -var File_pkg_services_tree_service_proto protoreflect.FileDescriptor - -var file_pkg_services_tree_service_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, - 0x72, 0x65, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x04, 0x74, 0x72, 0x65, 0x65, 0x1a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x02, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, - 0xa6, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, - 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, - 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, - 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x0b, 0x41, 0x64, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x1a, 0x1f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6e, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, - 0x64, 0x65, 0x49, 0x64, 0x22, 0xb9, 0x02, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, - 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, - 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, - 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, - 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0xaf, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42, - 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, - 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x22, 0xec, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x76, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xaa, 0x02, 0x0a, 0x0b, 0x4d, 0x6f, - 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x1a, 0xbf, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, - 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, - 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, - 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x72, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x85, 0x03, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, - 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x88, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73, - 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x5f, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, - 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65, - 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, - 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, - 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x1a, 0x3e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, - 0x73, 0x22, 0xc3, 0x03, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, - 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xcc, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, - 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, - 0x10, 0x00, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, - 0x70, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, - 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x3d, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x42, 0x79, 0x1a, 0x73, 0x0a, 0x05, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x09, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1e, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x41, 0x73, 0x63, 0x10, 0x01, 0x22, 0x83, 0x02, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, - 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, - 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x8a, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1b, 0x0a, 0x07, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x06, - 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x08, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, - 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x9b, 0x01, - 0x0a, 0x0f, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x10, - 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xdb, 0x01, 0x0a, 0x0c, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, - 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, - 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, - 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x6f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x09, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x09, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, - 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, - 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2, - 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x1a, 0x70, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, - 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, - 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x33, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, - 0x76, 0x65, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, - 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x22, 0x7e, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, - 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, - 0x32, 0xd6, 0x04, 0x0a, 0x0b, 0x54, 0x72, 0x65, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x2a, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x10, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, - 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, - 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x09, - 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, - 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, - 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x72, 0x65, 0x65, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x72, 0x65, - 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, - 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, - 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, - 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, - 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, - 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x12, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x54, - 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, - 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, - 0x12, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, - 0x70, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, - 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x72, - 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, - 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, - 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, - 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_pkg_services_tree_service_proto_rawDescOnce sync.Once - file_pkg_services_tree_service_proto_rawDescData = file_pkg_services_tree_service_proto_rawDesc -) - -func file_pkg_services_tree_service_proto_rawDescGZIP() []byte { - file_pkg_services_tree_service_proto_rawDescOnce.Do(func() { - file_pkg_services_tree_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_service_proto_rawDescData) - }) - return file_pkg_services_tree_service_proto_rawDescData -} - -var file_pkg_services_tree_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_pkg_services_tree_service_proto_msgTypes = make([]protoimpl.MessageInfo, 42) -var file_pkg_services_tree_service_proto_goTypes = []interface{}{ - (GetSubTreeRequest_Body_Order_Direction)(0), // 0: tree.GetSubTreeRequest.Body.Order.Direction - (*AddRequest)(nil), // 1: tree.AddRequest - (*AddResponse)(nil), // 2: tree.AddResponse - (*AddByPathRequest)(nil), // 3: tree.AddByPathRequest - (*AddByPathResponse)(nil), // 4: tree.AddByPathResponse - (*RemoveRequest)(nil), // 5: tree.RemoveRequest - (*RemoveResponse)(nil), // 6: tree.RemoveResponse - (*MoveRequest)(nil), // 7: tree.MoveRequest - (*MoveResponse)(nil), // 8: tree.MoveResponse - (*GetNodeByPathRequest)(nil), // 9: tree.GetNodeByPathRequest - (*GetNodeByPathResponse)(nil), // 10: tree.GetNodeByPathResponse - (*GetSubTreeRequest)(nil), // 11: tree.GetSubTreeRequest - (*GetSubTreeResponse)(nil), // 12: tree.GetSubTreeResponse - (*TreeListRequest)(nil), // 13: tree.TreeListRequest - (*TreeListResponse)(nil), // 14: tree.TreeListResponse - (*ApplyRequest)(nil), // 15: tree.ApplyRequest - (*ApplyResponse)(nil), // 16: tree.ApplyResponse - (*GetOpLogRequest)(nil), // 17: tree.GetOpLogRequest - (*GetOpLogResponse)(nil), // 18: tree.GetOpLogResponse - (*HealthcheckResponse)(nil), // 19: tree.HealthcheckResponse - (*HealthcheckRequest)(nil), // 20: tree.HealthcheckRequest - (*AddRequest_Body)(nil), // 21: tree.AddRequest.Body - (*AddResponse_Body)(nil), // 22: tree.AddResponse.Body - (*AddByPathRequest_Body)(nil), // 23: tree.AddByPathRequest.Body - (*AddByPathResponse_Body)(nil), // 24: tree.AddByPathResponse.Body - (*RemoveRequest_Body)(nil), // 25: tree.RemoveRequest.Body - (*RemoveResponse_Body)(nil), // 26: tree.RemoveResponse.Body - (*MoveRequest_Body)(nil), // 27: tree.MoveRequest.Body - (*MoveResponse_Body)(nil), // 28: tree.MoveResponse.Body - (*GetNodeByPathRequest_Body)(nil), // 29: tree.GetNodeByPathRequest.Body - (*GetNodeByPathResponse_Info)(nil), // 30: tree.GetNodeByPathResponse.Info - (*GetNodeByPathResponse_Body)(nil), // 31: tree.GetNodeByPathResponse.Body - (*GetSubTreeRequest_Body)(nil), // 32: tree.GetSubTreeRequest.Body - (*GetSubTreeRequest_Body_Order)(nil), // 33: tree.GetSubTreeRequest.Body.Order - (*GetSubTreeResponse_Body)(nil), // 34: tree.GetSubTreeResponse.Body - (*TreeListRequest_Body)(nil), // 35: tree.TreeListRequest.Body - (*TreeListResponse_Body)(nil), // 36: tree.TreeListResponse.Body - (*ApplyRequest_Body)(nil), // 37: tree.ApplyRequest.Body - (*ApplyResponse_Body)(nil), // 38: tree.ApplyResponse.Body - (*GetOpLogRequest_Body)(nil), // 39: tree.GetOpLogRequest.Body - (*GetOpLogResponse_Body)(nil), // 40: tree.GetOpLogResponse.Body - (*HealthcheckResponse_Body)(nil), // 41: tree.HealthcheckResponse.Body - (*HealthcheckRequest_Body)(nil), // 42: tree.HealthcheckRequest.Body - (*Signature)(nil), // 43: tree.Signature - (*KeyValue)(nil), // 44: tree.KeyValue - (*LogMove)(nil), // 45: tree.LogMove -} -var file_pkg_services_tree_service_proto_depIdxs = []int32{ - 21, // 0: tree.AddRequest.body:type_name -> tree.AddRequest.Body - 43, // 1: tree.AddRequest.signature:type_name -> tree.Signature - 22, // 2: tree.AddResponse.body:type_name -> tree.AddResponse.Body - 43, // 3: tree.AddResponse.signature:type_name -> tree.Signature - 23, // 4: tree.AddByPathRequest.body:type_name -> tree.AddByPathRequest.Body - 43, // 5: tree.AddByPathRequest.signature:type_name -> tree.Signature - 24, // 6: tree.AddByPathResponse.body:type_name -> tree.AddByPathResponse.Body - 43, // 7: tree.AddByPathResponse.signature:type_name -> tree.Signature - 25, // 8: tree.RemoveRequest.body:type_name -> tree.RemoveRequest.Body - 43, // 9: tree.RemoveRequest.signature:type_name -> tree.Signature - 26, // 10: tree.RemoveResponse.body:type_name -> tree.RemoveResponse.Body - 43, // 11: tree.RemoveResponse.signature:type_name -> tree.Signature - 27, // 12: tree.MoveRequest.body:type_name -> tree.MoveRequest.Body - 43, // 13: tree.MoveRequest.signature:type_name -> tree.Signature - 28, // 14: tree.MoveResponse.body:type_name -> tree.MoveResponse.Body - 43, // 15: tree.MoveResponse.signature:type_name -> tree.Signature - 29, // 16: tree.GetNodeByPathRequest.body:type_name -> tree.GetNodeByPathRequest.Body - 43, // 17: tree.GetNodeByPathRequest.signature:type_name -> tree.Signature - 31, // 18: tree.GetNodeByPathResponse.body:type_name -> tree.GetNodeByPathResponse.Body - 43, // 19: tree.GetNodeByPathResponse.signature:type_name -> tree.Signature - 32, // 20: tree.GetSubTreeRequest.body:type_name -> tree.GetSubTreeRequest.Body - 43, // 21: tree.GetSubTreeRequest.signature:type_name -> tree.Signature - 34, // 22: tree.GetSubTreeResponse.body:type_name -> tree.GetSubTreeResponse.Body - 43, // 23: tree.GetSubTreeResponse.signature:type_name -> tree.Signature - 35, // 24: tree.TreeListRequest.body:type_name -> tree.TreeListRequest.Body - 43, // 25: tree.TreeListRequest.signature:type_name -> tree.Signature - 36, // 26: tree.TreeListResponse.body:type_name -> tree.TreeListResponse.Body - 43, // 27: tree.TreeListResponse.signature:type_name -> tree.Signature - 37, // 28: tree.ApplyRequest.body:type_name -> tree.ApplyRequest.Body - 43, // 29: tree.ApplyRequest.signature:type_name -> tree.Signature - 38, // 30: tree.ApplyResponse.body:type_name -> tree.ApplyResponse.Body - 43, // 31: tree.ApplyResponse.signature:type_name -> tree.Signature - 39, // 32: tree.GetOpLogRequest.body:type_name -> tree.GetOpLogRequest.Body - 43, // 33: tree.GetOpLogRequest.signature:type_name -> tree.Signature - 40, // 34: tree.GetOpLogResponse.body:type_name -> tree.GetOpLogResponse.Body - 43, // 35: tree.GetOpLogResponse.signature:type_name -> tree.Signature - 41, // 36: tree.HealthcheckResponse.body:type_name -> tree.HealthcheckResponse.Body - 43, // 37: tree.HealthcheckResponse.signature:type_name -> tree.Signature - 42, // 38: tree.HealthcheckRequest.body:type_name -> tree.HealthcheckRequest.Body - 43, // 39: tree.HealthcheckRequest.signature:type_name -> tree.Signature - 44, // 40: tree.AddRequest.Body.meta:type_name -> tree.KeyValue - 44, // 41: tree.AddByPathRequest.Body.meta:type_name -> tree.KeyValue - 44, // 42: tree.MoveRequest.Body.meta:type_name -> tree.KeyValue - 44, // 43: tree.GetNodeByPathResponse.Info.meta:type_name -> tree.KeyValue - 30, // 44: tree.GetNodeByPathResponse.Body.nodes:type_name -> tree.GetNodeByPathResponse.Info - 33, // 45: tree.GetSubTreeRequest.Body.order_by:type_name -> tree.GetSubTreeRequest.Body.Order - 0, // 46: tree.GetSubTreeRequest.Body.Order.direction:type_name -> tree.GetSubTreeRequest.Body.Order.Direction - 44, // 47: tree.GetSubTreeResponse.Body.meta:type_name -> tree.KeyValue - 45, // 48: tree.ApplyRequest.Body.operation:type_name -> tree.LogMove - 45, // 49: tree.GetOpLogResponse.Body.operation:type_name -> tree.LogMove - 1, // 50: tree.TreeService.Add:input_type -> tree.AddRequest - 3, // 51: tree.TreeService.AddByPath:input_type -> tree.AddByPathRequest - 5, // 52: tree.TreeService.Remove:input_type -> tree.RemoveRequest - 7, // 53: tree.TreeService.Move:input_type -> tree.MoveRequest - 9, // 54: tree.TreeService.GetNodeByPath:input_type -> tree.GetNodeByPathRequest - 11, // 55: tree.TreeService.GetSubTree:input_type -> tree.GetSubTreeRequest - 13, // 56: tree.TreeService.TreeList:input_type -> tree.TreeListRequest - 15, // 57: tree.TreeService.Apply:input_type -> tree.ApplyRequest - 17, // 58: tree.TreeService.GetOpLog:input_type -> tree.GetOpLogRequest - 20, // 59: tree.TreeService.Healthcheck:input_type -> tree.HealthcheckRequest - 2, // 60: tree.TreeService.Add:output_type -> tree.AddResponse - 4, // 61: tree.TreeService.AddByPath:output_type -> tree.AddByPathResponse - 6, // 62: tree.TreeService.Remove:output_type -> tree.RemoveResponse - 8, // 63: tree.TreeService.Move:output_type -> tree.MoveResponse - 10, // 64: tree.TreeService.GetNodeByPath:output_type -> tree.GetNodeByPathResponse - 12, // 65: tree.TreeService.GetSubTree:output_type -> tree.GetSubTreeResponse - 14, // 66: tree.TreeService.TreeList:output_type -> tree.TreeListResponse - 16, // 67: tree.TreeService.Apply:output_type -> tree.ApplyResponse - 18, // 68: tree.TreeService.GetOpLog:output_type -> tree.GetOpLogResponse - 19, // 69: tree.TreeService.Healthcheck:output_type -> tree.HealthcheckResponse - 60, // [60:70] is the sub-list for method output_type - 50, // [50:60] is the sub-list for method input_type - 50, // [50:50] is the sub-list for extension type_name - 50, // [50:50] is the sub-list for extension extendee - 0, // [0:50] is the sub-list for field type_name -} - -func init() { file_pkg_services_tree_service_proto_init() } -func file_pkg_services_tree_service_proto_init() { - if File_pkg_services_tree_service_proto != nil { - return - } - file_pkg_services_tree_types_proto_init() - if !protoimpl.UnsafeEnabled { - file_pkg_services_tree_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddByPathRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddByPathResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeByPathRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeByPathResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubTreeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubTreeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TreeListRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TreeListResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpLogRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpLogResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthcheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthcheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddByPathRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddByPathResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MoveResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeByPathRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeByPathResponse_Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeByPathResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubTreeRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubTreeRequest_Body_Order); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubTreeResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TreeListRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TreeListResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpLogRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOpLogResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthcheckResponse_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthcheckRequest_Body); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_tree_service_proto_rawDesc, - NumEnums: 1, - NumMessages: 42, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_services_tree_service_proto_goTypes, - DependencyIndexes: file_pkg_services_tree_service_proto_depIdxs, - EnumInfos: file_pkg_services_tree_service_proto_enumTypes, - MessageInfos: file_pkg_services_tree_service_proto_msgTypes, - }.Build() - File_pkg_services_tree_service_proto = out.File - file_pkg_services_tree_service_proto_rawDesc = nil - file_pkg_services_tree_service_proto_goTypes = nil - file_pkg_services_tree_service_proto_depIdxs = nil -} diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go index 1a49c5c0c..88d002621 100644 --- a/pkg/services/tree/service_frostfs.pb.go +++ b/pkg/services/tree/service_frostfs.pb.go @@ -3,11 +3,32 @@ package tree import ( - binary "encoding/binary" + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" protowire "google.golang.org/protobuf/encoding/protowire" + strconv "strconv" ) -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +type AddRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + ParentId uint64 `json:"parentId"` + Meta []KeyValue `json:"meta"` + BearerToken []byte `json:"bearerToken"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddRequest_Body)(nil) + _ json.Marshaler = (*AddRequest_Body)(nil) + _ json.Unmarshaler = (*AddRequest_Body)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -20,38 +41,322 @@ func (x *AddRequest_Body) StableSize() (size int) { size += proto.StringSize(2, x.TreeId) size += proto.UInt64Size(3, x.ParentId) for i := range x.Meta { - size += proto.NestedStructureSize(4, x.Meta[i]) + size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i]) } size += proto.BytesSize(5, x.BearerToken) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId) - for i := range x.Meta { - offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i]) - } - offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.ParentId != 0 { + mm.AppendUint64(3, x.ParentId) + } + for i := range x.Meta { + x.Meta[i].EmitProtobuf(mm.AppendMessage(4)) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(5, x.BearerToken) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = data + case 4: // Meta + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = append(x.Meta, KeyValue{}) + ff := &x.Meta[len(x.Meta)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 5: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + } + } + return nil +} +func (x *AddRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *AddRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *AddRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *AddRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *AddRequest_Body) GetParentId() uint64 { + if x != nil { + return x.ParentId + } + return 0 +} +func (x *AddRequest_Body) SetParentId(v uint64) { + x.ParentId = v +} +func (x *AddRequest_Body) GetMeta() []KeyValue { + if x != nil { + return x.Meta + } + return nil +} +func (x *AddRequest_Body) SetMeta(v []KeyValue) { + x.Meta = v +} +func (x *AddRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *AddRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Meta { + if i != 0 { + out.RawByte(',') + } + x.Meta[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "parentId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ParentId = f + } + case "meta": + { + var f KeyValue + var list []KeyValue + in.Delim('[') + for !in.IsDelim(']') { + f = KeyValue{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Meta = list + in.Delim(']') + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddRequest struct { + Body *AddRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddRequest)(nil) + _ encoding.ProtoUnmarshaler = (*AddRequest)(nil) + _ json.Marshaler = (*AddRequest)(nil) + _ json.Unmarshaler = (*AddRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -64,27 +369,6 @@ func (x *AddRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -100,13 +384,175 @@ func (x *AddRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddRequest) GetBody() *AddRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddRequest) SetBody(v *AddRequest_Body) { + x.Body = v +} +func (x *AddRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddRequest_Body + f = new(AddRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddResponse_Body struct { + NodeId uint64 `json:"nodeId"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddResponse_Body)(nil) + _ json.Marshaler = (*AddResponse_Body)(nil) + _ json.Unmarshaler = (*AddResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -118,26 +564,141 @@ func (x *AddResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.NodeId != 0 { + mm.AppendUint64(1, x.NodeId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddResponse_Body") + } + switch fc.FieldNum { + case 1: // NodeId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NodeId") + } + x.NodeId = data + } + } + return nil +} +func (x *AddResponse_Body) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} +func (x *AddResponse_Body) SetNodeId(v uint64) { + x.NodeId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "nodeId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.NodeId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddResponse struct { + Body *AddResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddResponse)(nil) + _ encoding.ProtoUnmarshaler = (*AddResponse)(nil) + _ json.Marshaler = (*AddResponse)(nil) + _ json.Unmarshaler = (*AddResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -150,27 +711,6 @@ func (x *AddResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -186,13 +726,180 @@ func (x *AddResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddResponse) GetBody() *AddResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddResponse) SetBody(v *AddResponse_Body) { + x.Body = v +} +func (x *AddResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddResponse_Body + f = new(AddResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddByPathRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + PathAttribute string `json:"pathAttribute"` + Path []string `json:"path"` + Meta []KeyValue `json:"meta"` + BearerToken []byte `json:"bearerToken"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddByPathRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddByPathRequest_Body)(nil) + _ json.Marshaler = (*AddByPathRequest_Body)(nil) + _ json.Unmarshaler = (*AddByPathRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -205,39 +912,360 @@ func (x *AddByPathRequest_Body) StableSize() (size int) { size += proto.StringSize(3, x.PathAttribute) size += proto.RepeatedStringSize(4, x.Path) for i := range x.Meta { - size += proto.NestedStructureSize(5, x.Meta[i]) + size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i]) } size += proto.BytesSize(6, x.BearerToken) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddByPathRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute) - offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path) - for i := range x.Meta { - offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i]) - } - offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddByPathRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if len(x.PathAttribute) != 0 { + mm.AppendString(3, x.PathAttribute) + } + for j := range x.Path { + mm.AppendString(4, x.Path[j]) + } + for i := range x.Meta { + x.Meta[i].EmitProtobuf(mm.AppendMessage(5)) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(6, x.BearerToken) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddByPathRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // PathAttribute + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "PathAttribute") + } + x.PathAttribute = data + case 4: // Path + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Path") + } + x.Path = append(x.Path, data) + case 5: // Meta + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = append(x.Meta, KeyValue{}) + ff := &x.Meta[len(x.Meta)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 6: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + } + } + return nil +} +func (x *AddByPathRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *AddByPathRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *AddByPathRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *AddByPathRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *AddByPathRequest_Body) GetPathAttribute() string { + if x != nil { + return x.PathAttribute + } + return "" +} +func (x *AddByPathRequest_Body) SetPathAttribute(v string) { + x.PathAttribute = v +} +func (x *AddByPathRequest_Body) GetPath() []string { + if x != nil { + return x.Path + } + return nil +} +func (x *AddByPathRequest_Body) SetPath(v []string) { + x.Path = v +} +func (x *AddByPathRequest_Body) GetMeta() []KeyValue { + if x != nil { + return x.Meta + } + return nil +} +func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) { + x.Meta = v +} +func (x *AddByPathRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *AddByPathRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddByPathRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"pathAttribute\":" + out.RawString(prefix) + out.String(x.PathAttribute) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Path { + if i != 0 { + out.RawByte(',') + } + out.String(x.Path[i]) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Meta { + if i != 0 { + out.RawByte(',') + } + x.Meta[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddByPathRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "pathAttribute": + { + var f string + f = in.String() + x.PathAttribute = f + } + case "path": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Path = list + in.Delim(']') + } + case "meta": + { + var f KeyValue + var list []KeyValue + in.Delim('[') + for !in.IsDelim(']') { + f = KeyValue{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Meta = list + in.Delim(']') + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddByPathRequest struct { + Body *AddByPathRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddByPathRequest)(nil) + _ encoding.ProtoUnmarshaler = (*AddByPathRequest)(nil) + _ json.Marshaler = (*AddByPathRequest)(nil) + _ json.Unmarshaler = (*AddByPathRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -250,27 +1278,6 @@ func (x *AddByPathRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddByPathRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -286,13 +1293,176 @@ func (x *AddByPathRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddByPathRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddByPathRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddByPathRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddByPathRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddByPathRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddByPathRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddByPathRequest) SetBody(v *AddByPathRequest_Body) { + x.Body = v +} +func (x *AddByPathRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddByPathRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddByPathRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddByPathRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddByPathRequest_Body + f = new(AddByPathRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddByPathResponse_Body struct { + Nodes []uint64 `json:"nodes"` + ParentId uint64 `json:"parentId"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddByPathResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*AddByPathResponse_Body)(nil) + _ json.Marshaler = (*AddByPathResponse_Body)(nil) + _ json.Unmarshaler = (*AddByPathResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -307,27 +1477,199 @@ func (x *AddByPathResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddByPathResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedUInt64Marshal(1, buf[offset:], x.Nodes) - offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddByPathResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Nodes) != 0 { + mm.AppendUint64s(1, x.Nodes) + } + if x.ParentId != 0 { + mm.AppendUint64(2, x.ParentId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddByPathResponse_Body") + } + switch fc.FieldNum { + case 1: // Nodes + data, ok := fc.UnpackUint64s(nil) + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Nodes") + } + x.Nodes = data + case 2: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = data + } + } + return nil +} +func (x *AddByPathResponse_Body) GetNodes() []uint64 { + if x != nil { + return x.Nodes + } + return nil +} +func (x *AddByPathResponse_Body) SetNodes(v []uint64) { + x.Nodes = v +} +func (x *AddByPathResponse_Body) GetParentId() uint64 { + if x != nil { + return x.ParentId + } + return 0 +} +func (x *AddByPathResponse_Body) SetParentId(v uint64) { + x.ParentId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddByPathResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Nodes { + if i != 0 { + out.RawByte(',') + } + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10) + out.RawByte('"') + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddByPathResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "nodes": + { + var f uint64 + var list []uint64 + in.Delim('[') + for !in.IsDelim(']') { + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + list = append(list, f) + in.WantComma() + } + x.Nodes = list + in.Delim(']') + } + case "parentId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ParentId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type AddByPathResponse struct { + Body *AddByPathResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*AddByPathResponse)(nil) + _ encoding.ProtoUnmarshaler = (*AddByPathResponse)(nil) + _ json.Marshaler = (*AddByPathResponse)(nil) + _ json.Unmarshaler = (*AddByPathResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -340,27 +1682,6 @@ func (x *AddByPathResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *AddByPathResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -376,13 +1697,178 @@ func (x *AddByPathResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *AddByPathResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *AddByPathResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *AddByPathResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *AddByPathResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "AddByPathResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(AddByPathResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *AddByPathResponse) SetBody(v *AddByPathResponse_Body) { + x.Body = v +} +func (x *AddByPathResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *AddByPathResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *AddByPathResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *AddByPathResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *AddByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *AddByPathResponse_Body + f = new(AddByPathResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + NodeId uint64 `json:"nodeId"` + BearerToken []byte `json:"bearerToken"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveRequest_Body)(nil) + _ json.Marshaler = (*RemoveRequest_Body)(nil) + _ json.Unmarshaler = (*RemoveRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -397,29 +1883,263 @@ func (x *RemoveRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.UInt64Marshal(3, buf[offset:], x.NodeId) - offset += proto.BytesMarshal(4, buf[offset:], x.BearerToken) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.NodeId != 0 { + mm.AppendUint64(3, x.NodeId) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(4, x.BearerToken) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // NodeId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NodeId") + } + x.NodeId = data + case 4: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + } + } + return nil +} +func (x *RemoveRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *RemoveRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *RemoveRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *RemoveRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *RemoveRequest_Body) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} +func (x *RemoveRequest_Body) SetNodeId(v uint64) { + x.NodeId = v +} +func (x *RemoveRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *RemoveRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "nodeId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.NodeId = f + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveRequest struct { + Body *RemoveRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveRequest)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveRequest)(nil) + _ json.Marshaler = (*RemoveRequest)(nil) + _ json.Unmarshaler = (*RemoveRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -432,27 +2152,6 @@ func (x *RemoveRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -468,13 +2167,174 @@ func (x *RemoveRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveRequest) GetBody() *RemoveRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveRequest) SetBody(v *RemoveRequest_Body) { + x.Body = v +} +func (x *RemoveRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveRequest_Body + f = new(RemoveRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveResponse_Body)(nil) + _ json.Marshaler = (*RemoveResponse_Body)(nil) + _ json.Unmarshaler = (*RemoveResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -485,18 +2345,93 @@ func (x *RemoveResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type RemoveResponse struct { + Body *RemoveResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*RemoveResponse)(nil) + _ encoding.ProtoUnmarshaler = (*RemoveResponse)(nil) + _ json.Marshaler = (*RemoveResponse)(nil) + _ json.Unmarshaler = (*RemoveResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -509,27 +2444,6 @@ func (x *RemoveResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *RemoveResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -545,13 +2459,180 @@ func (x *RemoveResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *RemoveResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *RemoveResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *RemoveResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *RemoveResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "RemoveResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(RemoveResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *RemoveResponse) GetBody() *RemoveResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *RemoveResponse) SetBody(v *RemoveResponse_Body) { + x.Body = v +} +func (x *RemoveResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *RemoveResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *RemoveResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *RemoveResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *RemoveResponse_Body + f = new(RemoveResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type MoveRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + ParentId uint64 `json:"parentId"` + NodeId uint64 `json:"nodeId"` + Meta []KeyValue `json:"meta"` + BearerToken []byte `json:"bearerToken"` +} + +var ( + _ encoding.ProtoMarshaler = (*MoveRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*MoveRequest_Body)(nil) + _ json.Marshaler = (*MoveRequest_Body)(nil) + _ json.Unmarshaler = (*MoveRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -564,39 +2645,366 @@ func (x *MoveRequest_Body) StableSize() (size int) { size += proto.UInt64Size(3, x.ParentId) size += proto.UInt64Size(4, x.NodeId) for i := range x.Meta { - size += proto.NestedStructureSize(5, x.Meta[i]) + size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i]) } size += proto.BytesSize(6, x.BearerToken) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *MoveRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId) - offset += proto.UInt64Marshal(4, buf[offset:], x.NodeId) - for i := range x.Meta { - offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i]) - } - offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *MoveRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.ParentId != 0 { + mm.AppendUint64(3, x.ParentId) + } + if x.NodeId != 0 { + mm.AppendUint64(4, x.NodeId) + } + for i := range x.Meta { + x.Meta[i].EmitProtobuf(mm.AppendMessage(5)) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(6, x.BearerToken) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "MoveRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = data + case 4: // NodeId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NodeId") + } + x.NodeId = data + case 5: // Meta + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = append(x.Meta, KeyValue{}) + ff := &x.Meta[len(x.Meta)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 6: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + } + } + return nil +} +func (x *MoveRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *MoveRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *MoveRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *MoveRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *MoveRequest_Body) GetParentId() uint64 { + if x != nil { + return x.ParentId + } + return 0 +} +func (x *MoveRequest_Body) SetParentId(v uint64) { + x.ParentId = v +} +func (x *MoveRequest_Body) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} +func (x *MoveRequest_Body) SetNodeId(v uint64) { + x.NodeId = v +} +func (x *MoveRequest_Body) GetMeta() []KeyValue { + if x != nil { + return x.Meta + } + return nil +} +func (x *MoveRequest_Body) SetMeta(v []KeyValue) { + x.Meta = v +} +func (x *MoveRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *MoveRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *MoveRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Meta { + if i != 0 { + out.RawByte(',') + } + x.Meta[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *MoveRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "parentId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ParentId = f + } + case "nodeId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.NodeId = f + } + case "meta": + { + var f KeyValue + var list []KeyValue + in.Delim('[') + for !in.IsDelim(']') { + f = KeyValue{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Meta = list + in.Delim(']') + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type MoveRequest struct { + Body *MoveRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*MoveRequest)(nil) + _ encoding.ProtoUnmarshaler = (*MoveRequest)(nil) + _ json.Marshaler = (*MoveRequest)(nil) + _ json.Unmarshaler = (*MoveRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -609,27 +3017,6 @@ func (x *MoveRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *MoveRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -645,13 +3032,174 @@ func (x *MoveRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *MoveRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *MoveRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *MoveRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *MoveRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "MoveRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(MoveRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *MoveRequest) GetBody() *MoveRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *MoveRequest) SetBody(v *MoveRequest_Body) { + x.Body = v +} +func (x *MoveRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *MoveRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *MoveRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *MoveRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *MoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *MoveRequest_Body + f = new(MoveRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type MoveResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*MoveResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*MoveResponse_Body)(nil) + _ json.Marshaler = (*MoveResponse_Body)(nil) + _ json.Unmarshaler = (*MoveResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -662,18 +3210,93 @@ func (x *MoveResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *MoveResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *MoveResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *MoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *MoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "MoveResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *MoveResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *MoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *MoveResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *MoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type MoveResponse struct { + Body *MoveResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*MoveResponse)(nil) + _ encoding.ProtoUnmarshaler = (*MoveResponse)(nil) + _ json.Marshaler = (*MoveResponse)(nil) + _ json.Unmarshaler = (*MoveResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -686,27 +3309,6 @@ func (x *MoveResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *MoveResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -722,13 +3324,182 @@ func (x *MoveResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *MoveResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *MoveResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *MoveResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *MoveResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "MoveResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(MoveResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *MoveResponse) GetBody() *MoveResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *MoveResponse) SetBody(v *MoveResponse_Body) { + x.Body = v +} +func (x *MoveResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *MoveResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *MoveResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *MoveResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *MoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *MoveResponse_Body + f = new(MoveResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNodeByPathRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + PathAttribute string `json:"pathAttribute"` + Path []string `json:"path"` + Attributes []string `json:"attributes"` + LatestOnly bool `json:"latestOnly"` + AllAttributes bool `json:"allAttributes"` + BearerToken []byte `json:"bearerToken"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNodeByPathRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest_Body)(nil) + _ json.Marshaler = (*GetNodeByPathRequest_Body)(nil) + _ json.Unmarshaler = (*GetNodeByPathRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -747,33 +3518,417 @@ func (x *GetNodeByPathRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNodeByPathRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute) - offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path) - offset += proto.RepeatedStringMarshal(5, buf[offset:], x.Attributes) - offset += proto.BoolMarshal(6, buf[offset:], x.LatestOnly) - offset += proto.BoolMarshal(7, buf[offset:], x.AllAttributes) - offset += proto.BytesMarshal(8, buf[offset:], x.BearerToken) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNodeByPathRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNodeByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if len(x.PathAttribute) != 0 { + mm.AppendString(3, x.PathAttribute) + } + for j := range x.Path { + mm.AppendString(4, x.Path[j]) + } + for j := range x.Attributes { + mm.AppendString(5, x.Attributes[j]) + } + if x.LatestOnly { + mm.AppendBool(6, x.LatestOnly) + } + if x.AllAttributes { + mm.AppendBool(7, x.AllAttributes) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(8, x.BearerToken) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNodeByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // PathAttribute + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "PathAttribute") + } + x.PathAttribute = data + case 4: // Path + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Path") + } + x.Path = append(x.Path, data) + case 5: // Attributes + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Attributes") + } + x.Attributes = append(x.Attributes, data) + case 6: // LatestOnly + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "LatestOnly") + } + x.LatestOnly = data + case 7: // AllAttributes + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "AllAttributes") + } + x.AllAttributes = data + case 8: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + } + } + return nil +} +func (x *GetNodeByPathRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *GetNodeByPathRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *GetNodeByPathRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *GetNodeByPathRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *GetNodeByPathRequest_Body) GetPathAttribute() string { + if x != nil { + return x.PathAttribute + } + return "" +} +func (x *GetNodeByPathRequest_Body) SetPathAttribute(v string) { + x.PathAttribute = v +} +func (x *GetNodeByPathRequest_Body) GetPath() []string { + if x != nil { + return x.Path + } + return nil +} +func (x *GetNodeByPathRequest_Body) SetPath(v []string) { + x.Path = v +} +func (x *GetNodeByPathRequest_Body) GetAttributes() []string { + if x != nil { + return x.Attributes + } + return nil +} +func (x *GetNodeByPathRequest_Body) SetAttributes(v []string) { + x.Attributes = v +} +func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool { + if x != nil { + return x.LatestOnly + } + return false +} +func (x *GetNodeByPathRequest_Body) SetLatestOnly(v bool) { + x.LatestOnly = v +} +func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool { + if x != nil { + return x.AllAttributes + } + return false +} +func (x *GetNodeByPathRequest_Body) SetAllAttributes(v bool) { + x.AllAttributes = v +} +func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *GetNodeByPathRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNodeByPathRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"pathAttribute\":" + out.RawString(prefix) + out.String(x.PathAttribute) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Path { + if i != 0 { + out.RawByte(',') + } + out.String(x.Path[i]) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"attributes\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Attributes { + if i != 0 { + out.RawByte(',') + } + out.String(x.Attributes[i]) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"latestOnly\":" + out.RawString(prefix) + out.Bool(x.LatestOnly) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"allAttributes\":" + out.RawString(prefix) + out.Bool(x.AllAttributes) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNodeByPathRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "pathAttribute": + { + var f string + f = in.String() + x.PathAttribute = f + } + case "path": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Path = list + in.Delim(']') + } + case "attributes": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Attributes = list + in.Delim(']') + } + case "latestOnly": + { + var f bool + f = in.Bool() + x.LatestOnly = f + } + case "allAttributes": + { + var f bool + f = in.Bool() + x.AllAttributes = f + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNodeByPathRequest struct { + Body *GetNodeByPathRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNodeByPathRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest)(nil) + _ json.Marshaler = (*GetNodeByPathRequest)(nil) + _ json.Unmarshaler = (*GetNodeByPathRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -786,27 +3941,6 @@ func (x *GetNodeByPathRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNodeByPathRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -822,13 +3956,178 @@ func (x *GetNodeByPathRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetNodeByPathRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetNodeByPathRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNodeByPathRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNodeByPathRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetNodeByPathRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetNodeByPathRequest) SetBody(v *GetNodeByPathRequest_Body) { + x.Body = v +} +func (x *GetNodeByPathRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetNodeByPathRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNodeByPathRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNodeByPathRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetNodeByPathRequest_Body + f = new(GetNodeByPathRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNodeByPathResponse_Info struct { + NodeId uint64 `json:"nodeId"` + Timestamp uint64 `json:"timestamp"` + Meta []KeyValue `json:"meta"` + ParentId uint64 `json:"parentId"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Info)(nil) + _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Info)(nil) + _ json.Marshaler = (*GetNodeByPathResponse_Info)(nil) + _ json.Unmarshaler = (*GetNodeByPathResponse_Info)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -839,37 +4138,287 @@ func (x *GetNodeByPathResponse_Info) StableSize() (size int) { size += proto.UInt64Size(1, x.NodeId) size += proto.UInt64Size(2, x.Timestamp) for i := range x.Meta { - size += proto.NestedStructureSize(3, x.Meta[i]) + size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i]) } size += proto.UInt64Size(4, x.ParentId) return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNodeByPathResponse_Info) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId) - offset += proto.UInt64Marshal(2, buf[offset:], x.Timestamp) - for i := range x.Meta { - offset += proto.NestedStructureMarshal(3, buf[offset:], x.Meta[i]) - } - offset += proto.UInt64Marshal(4, buf[offset:], x.ParentId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNodeByPathResponse_Info) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.NodeId != 0 { + mm.AppendUint64(1, x.NodeId) + } + if x.Timestamp != 0 { + mm.AppendUint64(2, x.Timestamp) + } + for i := range x.Meta { + x.Meta[i].EmitProtobuf(mm.AppendMessage(3)) + } + if x.ParentId != 0 { + mm.AppendUint64(4, x.ParentId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Info") + } + switch fc.FieldNum { + case 1: // NodeId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NodeId") + } + x.NodeId = data + case 2: // Timestamp + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Timestamp") + } + x.Timestamp = data + case 3: // Meta + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = append(x.Meta, KeyValue{}) + ff := &x.Meta[len(x.Meta)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 4: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = data + } + } + return nil +} +func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} +func (x *GetNodeByPathResponse_Info) SetNodeId(v uint64) { + x.NodeId = v +} +func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} +func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) { + x.Timestamp = v +} +func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue { + if x != nil { + return x.Meta + } + return nil +} +func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) { + x.Meta = v +} +func (x *GetNodeByPathResponse_Info) GetParentId() uint64 { + if x != nil { + return x.ParentId + } + return 0 +} +func (x *GetNodeByPathResponse_Info) SetParentId(v uint64) { + x.ParentId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNodeByPathResponse_Info) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"timestamp\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Meta { + if i != 0 { + out.RawByte(',') + } + x.Meta[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNodeByPathResponse_Info) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "nodeId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.NodeId = f + } + case "timestamp": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Timestamp = f + } + case "meta": + { + var f KeyValue + var list []KeyValue + in.Delim('[') + for !in.IsDelim(']') { + f = KeyValue{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Meta = list + in.Delim(']') + } + case "parentId": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ParentId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNodeByPathResponse_Body struct { + Nodes []GetNodeByPathResponse_Info `json:"nodes"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Body)(nil) + _ json.Marshaler = (*GetNodeByPathResponse_Body)(nil) + _ json.Unmarshaler = (*GetNodeByPathResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -878,33 +4427,155 @@ func (x *GetNodeByPathResponse_Body) StableSize() (size int) { return 0 } for i := range x.Nodes { - size += proto.NestedStructureSize(1, x.Nodes[i]) + size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNodeByPathResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - for i := range x.Nodes { - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Nodes[i]) - } - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNodeByPathResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for i := range x.Nodes { + x.Nodes[i].EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Body") + } + switch fc.FieldNum { + case 1: // Nodes + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Nodes") + } + x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{}) + ff := &x.Nodes[len(x.Nodes)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info { + if x != nil { + return x.Nodes + } + return nil +} +func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) { + x.Nodes = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNodeByPathResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Nodes { + if i != 0 { + out.RawByte(',') + } + x.Nodes[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNodeByPathResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "nodes": + { + var f GetNodeByPathResponse_Info + var list []GetNodeByPathResponse_Info + in.Delim('[') + for !in.IsDelim(']') { + f = GetNodeByPathResponse_Info{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Nodes = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetNodeByPathResponse struct { + Body *GetNodeByPathResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetNodeByPathResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse)(nil) + _ json.Marshaler = (*GetNodeByPathResponse)(nil) + _ json.Unmarshaler = (*GetNodeByPathResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -917,27 +4588,6 @@ func (x *GetNodeByPathResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetNodeByPathResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -953,13 +4603,207 @@ func (x *GetNodeByPathResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetNodeByPathResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetNodeByPathResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetNodeByPathResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetNodeByPathResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetNodeByPathResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetNodeByPathResponse) SetBody(v *GetNodeByPathResponse_Body) { + x.Body = v +} +func (x *GetNodeByPathResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetNodeByPathResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetNodeByPathResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetNodeByPathResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetNodeByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetNodeByPathResponse_Body + f = new(GetNodeByPathResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetSubTreeRequest_Body_Order_Direction int32 + +const ( + GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0 + GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1 +) + +var ( + GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{ + 0: "None", + 1: "Asc", + } + GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{ + "None": 0, + "Asc": 1, + } +) + +func (x GetSubTreeRequest_Body_Order_Direction) String() string { + if v, ok := GetSubTreeRequest_Body_Order_Direction_name[int32(x)]; ok { + return v + } + return strconv.FormatInt(int64(x), 10) +} +func (x *GetSubTreeRequest_Body_Order_Direction) FromString(s string) bool { + if v, ok := GetSubTreeRequest_Body_Order_Direction_value[s]; ok { + *x = GetSubTreeRequest_Body_Order_Direction(v) + return true + } + return false +} + +type GetSubTreeRequest_Body_Order struct { + Direction GetSubTreeRequest_Body_Order_Direction `json:"direction"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body_Order)(nil) + _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body_Order)(nil) + _ json.Marshaler = (*GetSubTreeRequest_Body_Order)(nil) + _ json.Unmarshaler = (*GetSubTreeRequest_Body_Order)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -971,26 +4815,156 @@ func (x *GetSubTreeRequest_Body_Order) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetSubTreeRequest_Body_Order) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.EnumMarshal(1, buf[offset:], int32(x.Direction)) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetSubTreeRequest_Body_Order) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetSubTreeRequest_Body_Order) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if int32(x.Direction) != 0 { + mm.AppendInt32(1, int32(x.Direction)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetSubTreeRequest_Body_Order) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body_Order") + } + switch fc.FieldNum { + case 1: // Direction + data, ok := fc.Int32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Direction") + } + x.Direction = GetSubTreeRequest_Body_Order_Direction(data) + } + } + return nil +} +func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction { + if x != nil { + return x.Direction + } + return 0 +} +func (x *GetSubTreeRequest_Body_Order) SetDirection(v GetSubTreeRequest_Body_Order_Direction) { + x.Direction = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetSubTreeRequest_Body_Order) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"direction\":" + out.RawString(prefix) + v := int32(x.Direction) + if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetSubTreeRequest_Body_Order) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetSubTreeRequest_Body_Order) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "direction": + { + var f GetSubTreeRequest_Body_Order_Direction + var parsedValue GetSubTreeRequest_Body_Order_Direction + switch v := in.Interface().(type) { + case string: + if vv, ok := GetSubTreeRequest_Body_Order_Direction_value[v]; ok { + parsedValue = GetSubTreeRequest_Body_Order_Direction(vv) + break + } + vv, err := strconv.ParseInt(v, 10, 32) + if err != nil { + in.AddError(err) + return + } + parsedValue = GetSubTreeRequest_Body_Order_Direction(vv) + case float64: + parsedValue = GetSubTreeRequest_Body_Order_Direction(v) + } + f = parsedValue + x.Direction = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetSubTreeRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + RootId []uint64 `json:"rootId"` + Depth uint32 `json:"depth"` + BearerToken []byte `json:"bearerToken"` + OrderBy *GetSubTreeRequest_Body_Order `json:"orderBy"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body)(nil) + _ json.Marshaler = (*GetSubTreeRequest_Body)(nil) + _ json.Unmarshaler = (*GetSubTreeRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1009,36 +4983,356 @@ func (x *GetSubTreeRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetSubTreeRequest_Body) StableMarshal(buf []byte) []byte { +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetSubTreeRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { if x == nil { - return []byte{} + return } - if buf == nil { - buf = make([]byte, x.StableSize()) + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - for i := range x.RootId { - { - prefix := protowire.EncodeTag(protowire.Number(3), protowire.VarintType) - offset += binary.PutUvarint(buf[offset:], uint64(prefix)) - offset += binary.PutUvarint(buf[offset:], x.RootId[i]) + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + for j := range x.RootId { + mm.AppendUint64(3, x.RootId[j]) + } + if x.Depth != 0 { + mm.AppendUint32(4, x.Depth) + } + if len(x.BearerToken) != 0 { + mm.AppendBytes(5, x.BearerToken) + } + if x.OrderBy != nil { + x.OrderBy.EmitProtobuf(mm.AppendMessage(6)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetSubTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // RootId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "RootId") + } + x.RootId = append(x.RootId, data) + case 4: // Depth + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Depth") + } + x.Depth = data + case 5: // BearerToken + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "BearerToken") + } + x.BearerToken = data + case 6: // OrderBy + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "OrderBy") + } + x.OrderBy = new(GetSubTreeRequest_Body_Order) + if err := x.OrderBy.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } } } - offset += proto.UInt32Marshal(4, buf[offset:], x.Depth) - offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken) - offset += proto.NestedStructureMarshal(6, buf[offset:], x.OrderBy) - return buf + return nil } +func (x *GetSubTreeRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *GetSubTreeRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *GetSubTreeRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *GetSubTreeRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *GetSubTreeRequest_Body) GetRootId() []uint64 { + if x != nil { + return x.RootId + } + return nil +} +func (x *GetSubTreeRequest_Body) SetRootId(v []uint64) { + x.RootId = v +} +func (x *GetSubTreeRequest_Body) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} +func (x *GetSubTreeRequest_Body) SetDepth(v uint32) { + x.Depth = v +} +func (x *GetSubTreeRequest_Body) GetBearerToken() []byte { + if x != nil { + return x.BearerToken + } + return nil +} +func (x *GetSubTreeRequest_Body) SetBearerToken(v []byte) { + x.BearerToken = v +} +func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order { + if x != nil { + return x.OrderBy + } + return nil +} +func (x *GetSubTreeRequest_Body) SetOrderBy(v *GetSubTreeRequest_Body_Order) { + x.OrderBy = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetSubTreeRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"rootId\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.RootId { + if i != 0 { + out.RawByte(',') + } + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10) + out.RawByte('"') + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"depth\":" + out.RawString(prefix) + out.Uint32(x.Depth) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" + out.RawString(prefix) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"orderBy\":" + out.RawString(prefix) + x.OrderBy.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetSubTreeRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "rootId": + { + var f uint64 + var list []uint64 + in.Delim('[') + for !in.IsDelim(']') { + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + list = append(list, f) + in.WantComma() + } + x.RootId = list + in.Delim(']') + } + case "depth": + { + var f uint32 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv + x.Depth = f + } + case "bearerToken": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.BearerToken = f + } + case "orderBy": + { + var f *GetSubTreeRequest_Body_Order + f = new(GetSubTreeRequest_Body_Order) + f.UnmarshalEasyJSON(in) + x.OrderBy = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetSubTreeRequest struct { + Body *GetSubTreeRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetSubTreeRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest)(nil) + _ json.Marshaler = (*GetSubTreeRequest)(nil) + _ json.Unmarshaler = (*GetSubTreeRequest)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -1052,27 +5346,6 @@ func (x *GetSubTreeRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetSubTreeRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1088,13 +5361,178 @@ func (x *GetSubTreeRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetSubTreeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetSubTreeRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetSubTreeRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetSubTreeRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetSubTreeRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetSubTreeRequest) SetBody(v *GetSubTreeRequest_Body) { + x.Body = v +} +func (x *GetSubTreeRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetSubTreeRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetSubTreeRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetSubTreeRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetSubTreeRequest_Body + f = new(GetSubTreeRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetSubTreeResponse_Body struct { + NodeId []uint64 `json:"nodeId"` + ParentId []uint64 `json:"parentId"` + Timestamp []uint64 `json:"timestamp"` + Meta []KeyValue `json:"meta"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetSubTreeResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse_Body)(nil) + _ json.Marshaler = (*GetSubTreeResponse_Body)(nil) + _ json.Unmarshaler = (*GetSubTreeResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1112,54 +5550,329 @@ func (x *GetSubTreeResponse_Body) StableSize() (size int) { size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i])) } for i := range x.Meta { - size += proto.NestedStructureSize(4, x.Meta[i]) + size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i]) } return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetSubTreeResponse_Body) StableMarshal(buf []byte) []byte { +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetSubTreeResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { if x == nil { - return []byte{} + return } - if buf == nil { - buf = make([]byte, x.StableSize()) + for j := range x.NodeId { + mm.AppendUint64(1, x.NodeId[j]) } - var offset int - for i := range x.NodeId { - { - prefix := protowire.EncodeTag(protowire.Number(1), protowire.VarintType) - offset += binary.PutUvarint(buf[offset:], uint64(prefix)) - offset += binary.PutUvarint(buf[offset:], x.NodeId[i]) - } + for j := range x.ParentId { + mm.AppendUint64(2, x.ParentId[j]) } - for i := range x.ParentId { - { - prefix := protowire.EncodeTag(protowire.Number(2), protowire.VarintType) - offset += binary.PutUvarint(buf[offset:], uint64(prefix)) - offset += binary.PutUvarint(buf[offset:], x.ParentId[i]) - } - } - for i := range x.Timestamp { - { - prefix := protowire.EncodeTag(protowire.Number(3), protowire.VarintType) - offset += binary.PutUvarint(buf[offset:], uint64(prefix)) - offset += binary.PutUvarint(buf[offset:], x.Timestamp[i]) - } + for j := range x.Timestamp { + mm.AppendUint64(3, x.Timestamp[j]) } for i := range x.Meta { - offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i]) + x.Meta[i].EmitProtobuf(mm.AppendMessage(4)) } - return buf } +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse_Body") + } + switch fc.FieldNum { + case 1: // NodeId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "NodeId") + } + x.NodeId = append(x.NodeId, data) + case 2: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = append(x.ParentId, data) + case 3: // Timestamp + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Timestamp") + } + x.Timestamp = append(x.Timestamp, data) + case 4: // Meta + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = append(x.Meta, KeyValue{}) + ff := &x.Meta[len(x.Meta)-1] + if err := ff.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 { + if x != nil { + return x.NodeId + } + return nil +} +func (x *GetSubTreeResponse_Body) SetNodeId(v []uint64) { + x.NodeId = v +} +func (x *GetSubTreeResponse_Body) GetParentId() []uint64 { + if x != nil { + return x.ParentId + } + return nil +} +func (x *GetSubTreeResponse_Body) SetParentId(v []uint64) { + x.ParentId = v +} +func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 { + if x != nil { + return x.Timestamp + } + return nil +} +func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) { + x.Timestamp = v +} +func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue { + if x != nil { + return x.Meta + } + return nil +} +func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) { + x.Meta = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetSubTreeResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.NodeId { + if i != 0 { + out.RawByte(',') + } + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10) + out.RawByte('"') + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.ParentId { + if i != 0 { + out.RawByte(',') + } + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10) + out.RawByte('"') + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"timestamp\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Timestamp { + if i != 0 { + out.RawByte(',') + } + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10) + out.RawByte('"') + } + out.RawByte(']') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Meta { + if i != 0 { + out.RawByte(',') + } + x.Meta[i].MarshalEasyJSON(out) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetSubTreeResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "nodeId": + { + var f uint64 + var list []uint64 + in.Delim('[') + for !in.IsDelim(']') { + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + list = append(list, f) + in.WantComma() + } + x.NodeId = list + in.Delim(']') + } + case "parentId": + { + var f uint64 + var list []uint64 + in.Delim('[') + for !in.IsDelim(']') { + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + list = append(list, f) + in.WantComma() + } + x.ParentId = list + in.Delim(']') + } + case "timestamp": + { + var f uint64 + var list []uint64 + in.Delim('[') + for !in.IsDelim(']') { + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + list = append(list, f) + in.WantComma() + } + x.Timestamp = list + in.Delim(']') + } + case "meta": + { + var f KeyValue + var list []KeyValue + in.Delim('[') + for !in.IsDelim(']') { + f = KeyValue{} + f.UnmarshalEasyJSON(in) + list = append(list, f) + in.WantComma() + } + x.Meta = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetSubTreeResponse struct { + Body *GetSubTreeResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetSubTreeResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse)(nil) + _ json.Marshaler = (*GetSubTreeResponse)(nil) + _ json.Unmarshaler = (*GetSubTreeResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1172,27 +5885,6 @@ func (x *GetSubTreeResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetSubTreeResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1208,13 +5900,175 @@ func (x *GetSubTreeResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetSubTreeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetSubTreeResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetSubTreeResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetSubTreeResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetSubTreeResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetSubTreeResponse) SetBody(v *GetSubTreeResponse_Body) { + x.Body = v +} +func (x *GetSubTreeResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetSubTreeResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetSubTreeResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetSubTreeResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetSubTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetSubTreeResponse_Body + f = new(GetSubTreeResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TreeListRequest_Body struct { + ContainerId []byte `json:"containerId"` +} + +var ( + _ encoding.ProtoMarshaler = (*TreeListRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*TreeListRequest_Body)(nil) + _ json.Marshaler = (*TreeListRequest_Body)(nil) + _ json.Unmarshaler = (*TreeListRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1226,26 +6080,141 @@ func (x *TreeListRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TreeListRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TreeListRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TreeListRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TreeListRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TreeListRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + } + } + return nil +} +func (x *TreeListRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *TreeListRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TreeListRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TreeListRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TreeListRequest struct { + Body *TreeListRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*TreeListRequest)(nil) + _ encoding.ProtoUnmarshaler = (*TreeListRequest)(nil) + _ json.Marshaler = (*TreeListRequest)(nil) + _ json.Unmarshaler = (*TreeListRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1258,27 +6227,6 @@ func (x *TreeListRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TreeListRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1294,13 +6242,175 @@ func (x *TreeListRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *TreeListRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *TreeListRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TreeListRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TreeListRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TreeListRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(TreeListRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *TreeListRequest) GetBody() *TreeListRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *TreeListRequest) SetBody(v *TreeListRequest_Body) { + x.Body = v +} +func (x *TreeListRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *TreeListRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TreeListRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TreeListRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TreeListRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *TreeListRequest_Body + f = new(TreeListRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TreeListResponse_Body struct { + Ids []string `json:"ids"` +} + +var ( + _ encoding.ProtoMarshaler = (*TreeListResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*TreeListResponse_Body)(nil) + _ json.Marshaler = (*TreeListResponse_Body)(nil) + _ json.Unmarshaler = (*TreeListResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1312,26 +6422,145 @@ func (x *TreeListResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TreeListResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.RepeatedStringMarshal(1, buf[offset:], x.Ids) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TreeListResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TreeListResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Ids { + mm.AppendString(1, x.Ids[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TreeListResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TreeListResponse_Body") + } + switch fc.FieldNum { + case 1: // Ids + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Ids") + } + x.Ids = append(x.Ids, data) + } + } + return nil +} +func (x *TreeListResponse_Body) GetIds() []string { + if x != nil { + return x.Ids + } + return nil +} +func (x *TreeListResponse_Body) SetIds(v []string) { + x.Ids = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TreeListResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ids\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Ids { + if i != 0 { + out.RawByte(',') + } + out.String(x.Ids[i]) + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TreeListResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TreeListResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "ids": + { + var f string + var list []string + in.Delim('[') + for !in.IsDelim(']') { + f = in.String() + list = append(list, f) + in.WantComma() + } + x.Ids = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type TreeListResponse struct { + Body *TreeListResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*TreeListResponse)(nil) + _ encoding.ProtoUnmarshaler = (*TreeListResponse)(nil) + _ json.Marshaler = (*TreeListResponse)(nil) + _ json.Unmarshaler = (*TreeListResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1344,27 +6573,6 @@ func (x *TreeListResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *TreeListResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1380,13 +6588,177 @@ func (x *TreeListResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *TreeListResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *TreeListResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *TreeListResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *TreeListResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "TreeListResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(TreeListResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *TreeListResponse) GetBody() *TreeListResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *TreeListResponse) SetBody(v *TreeListResponse_Body) { + x.Body = v +} +func (x *TreeListResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *TreeListResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *TreeListResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *TreeListResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *TreeListResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *TreeListResponse_Body + f = new(TreeListResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ApplyRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + Operation *LogMove `json:"operation"` +} + +var ( + _ encoding.ProtoMarshaler = (*ApplyRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ApplyRequest_Body)(nil) + _ json.Marshaler = (*ApplyRequest_Body)(nil) + _ json.Unmarshaler = (*ApplyRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1400,28 +6772,213 @@ func (x *ApplyRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ApplyRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.NestedStructureMarshal(3, buf[offset:], x.Operation) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ApplyRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.Operation != nil { + x.Operation.EmitProtobuf(mm.AppendMessage(3)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ApplyRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ApplyRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // Operation + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Operation") + } + x.Operation = new(LogMove) + if err := x.Operation.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ApplyRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *ApplyRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *ApplyRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *ApplyRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *ApplyRequest_Body) GetOperation() *LogMove { + if x != nil { + return x.Operation + } + return nil +} +func (x *ApplyRequest_Body) SetOperation(v *LogMove) { + x.Operation = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ApplyRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"operation\":" + out.RawString(prefix) + x.Operation.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ApplyRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "operation": + { + var f *LogMove + f = new(LogMove) + f.UnmarshalEasyJSON(in) + x.Operation = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ApplyRequest struct { + Body *ApplyRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ApplyRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ApplyRequest)(nil) + _ json.Marshaler = (*ApplyRequest)(nil) + _ json.Unmarshaler = (*ApplyRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1434,27 +6991,6 @@ func (x *ApplyRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ApplyRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1470,13 +7006,174 @@ func (x *ApplyRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ApplyRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ApplyRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ApplyRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ApplyRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ApplyRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ApplyRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ApplyRequest) GetBody() *ApplyRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ApplyRequest) SetBody(v *ApplyRequest_Body) { + x.Body = v +} +func (x *ApplyRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ApplyRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ApplyRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ApplyRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ApplyRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ApplyRequest_Body + f = new(ApplyRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ApplyResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*ApplyResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ApplyResponse_Body)(nil) + _ json.Marshaler = (*ApplyResponse_Body)(nil) + _ json.Unmarshaler = (*ApplyResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1487,18 +7184,93 @@ func (x *ApplyResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ApplyResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ApplyResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ApplyResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ApplyResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ApplyResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ApplyResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ApplyResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ApplyResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ApplyResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ApplyResponse struct { + Body *ApplyResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ApplyResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ApplyResponse)(nil) + _ json.Marshaler = (*ApplyResponse)(nil) + _ json.Unmarshaler = (*ApplyResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1511,27 +7283,6 @@ func (x *ApplyResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *ApplyResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1547,13 +7298,178 @@ func (x *ApplyResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *ApplyResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *ApplyResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ApplyResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ApplyResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ApplyResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ApplyResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ApplyResponse) GetBody() *ApplyResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ApplyResponse) SetBody(v *ApplyResponse_Body) { + x.Body = v +} +func (x *ApplyResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ApplyResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ApplyResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ApplyResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ApplyResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ApplyResponse_Body + f = new(ApplyResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetOpLogRequest_Body struct { + ContainerId []byte `json:"containerId"` + TreeId string `json:"treeId"` + Height uint64 `json:"height"` + Count uint64 `json:"count"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetOpLogRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetOpLogRequest_Body)(nil) + _ json.Marshaler = (*GetOpLogRequest_Body)(nil) + _ json.Unmarshaler = (*GetOpLogRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1568,29 +7484,263 @@ func (x *GetOpLogRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetOpLogRequest_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId) - offset += proto.StringMarshal(2, buf[offset:], x.TreeId) - offset += proto.UInt64Marshal(3, buf[offset:], x.Height) - offset += proto.UInt64Marshal(4, buf[offset:], x.Count) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetOpLogRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetOpLogRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ContainerId) != 0 { + mm.AppendBytes(1, x.ContainerId) + } + if len(x.TreeId) != 0 { + mm.AppendString(2, x.TreeId) + } + if x.Height != 0 { + mm.AppendUint64(3, x.Height) + } + if x.Count != 0 { + mm.AppendUint64(4, x.Count) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetOpLogRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest_Body") + } + switch fc.FieldNum { + case 1: // ContainerId + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + case 2: // TreeId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "TreeId") + } + x.TreeId = data + case 3: // Height + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Height") + } + x.Height = data + case 4: // Count + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Count") + } + x.Count = data + } + } + return nil +} +func (x *GetOpLogRequest_Body) GetContainerId() []byte { + if x != nil { + return x.ContainerId + } + return nil +} +func (x *GetOpLogRequest_Body) SetContainerId(v []byte) { + x.ContainerId = v +} +func (x *GetOpLogRequest_Body) GetTreeId() string { + if x != nil { + return x.TreeId + } + return "" +} +func (x *GetOpLogRequest_Body) SetTreeId(v string) { + x.TreeId = v +} +func (x *GetOpLogRequest_Body) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} +func (x *GetOpLogRequest_Body) SetHeight(v uint64) { + x.Height = v +} +func (x *GetOpLogRequest_Body) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} +func (x *GetOpLogRequest_Body) SetCount(v uint64) { + x.Count = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetOpLogRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" + out.RawString(prefix) + out.String(x.TreeId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"height\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"count\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetOpLogRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "containerId": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.ContainerId = f + } + case "treeId": + { + var f string + f = in.String() + x.TreeId = f + } + case "height": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Height = f + } + case "count": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.Count = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetOpLogRequest struct { + Body *GetOpLogRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetOpLogRequest)(nil) + _ encoding.ProtoUnmarshaler = (*GetOpLogRequest)(nil) + _ json.Marshaler = (*GetOpLogRequest)(nil) + _ json.Unmarshaler = (*GetOpLogRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1603,27 +7753,6 @@ func (x *GetOpLogRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetOpLogRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1639,13 +7768,175 @@ func (x *GetOpLogRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetOpLogRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetOpLogRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetOpLogRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetOpLogRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetOpLogRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetOpLogRequest) SetBody(v *GetOpLogRequest_Body) { + x.Body = v +} +func (x *GetOpLogRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetOpLogRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetOpLogRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetOpLogRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetOpLogRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetOpLogRequest_Body + f = new(GetOpLogRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetOpLogResponse_Body struct { + Operation *LogMove `json:"operation"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetOpLogResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*GetOpLogResponse_Body)(nil) + _ json.Marshaler = (*GetOpLogResponse_Body)(nil) + _ json.Unmarshaler = (*GetOpLogResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1657,26 +7948,135 @@ func (x *GetOpLogResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetOpLogResponse_Body) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Operation) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetOpLogResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Operation != nil { + x.Operation.EmitProtobuf(mm.AppendMessage(1)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetOpLogResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse_Body") + } + switch fc.FieldNum { + case 1: // Operation + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Operation") + } + x.Operation = new(LogMove) + if err := x.Operation.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetOpLogResponse_Body) GetOperation() *LogMove { + if x != nil { + return x.Operation + } + return nil +} +func (x *GetOpLogResponse_Body) SetOperation(v *LogMove) { + x.Operation = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetOpLogResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"operation\":" + out.RawString(prefix) + x.Operation.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetOpLogResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetOpLogResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "operation": + { + var f *LogMove + f = new(LogMove) + f.UnmarshalEasyJSON(in) + x.Operation = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type GetOpLogResponse struct { + Body *GetOpLogResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*GetOpLogResponse)(nil) + _ encoding.ProtoUnmarshaler = (*GetOpLogResponse)(nil) + _ json.Marshaler = (*GetOpLogResponse)(nil) + _ json.Unmarshaler = (*GetOpLogResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1689,27 +8089,6 @@ func (x *GetOpLogResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *GetOpLogResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1725,13 +8104,174 @@ func (x *GetOpLogResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *GetOpLogResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *GetOpLogResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *GetOpLogResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *GetOpLogResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(GetOpLogResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *GetOpLogResponse) SetBody(v *GetOpLogResponse_Body) { + x.Body = v +} +func (x *GetOpLogResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *GetOpLogResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *GetOpLogResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *GetOpLogResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *GetOpLogResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *GetOpLogResponse_Body + f = new(GetOpLogResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthcheckResponse_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*HealthcheckResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthcheckResponse_Body)(nil) + _ json.Marshaler = (*HealthcheckResponse_Body)(nil) + _ json.Unmarshaler = (*HealthcheckResponse_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1742,18 +8282,93 @@ func (x *HealthcheckResponse_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthcheckResponse_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthcheckResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthcheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthcheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthcheckResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthcheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthcheckResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthcheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthcheckResponse struct { + Body *HealthcheckResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthcheckResponse)(nil) + _ encoding.ProtoUnmarshaler = (*HealthcheckResponse)(nil) + _ json.Marshaler = (*HealthcheckResponse)(nil) + _ json.Unmarshaler = (*HealthcheckResponse)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1766,27 +8381,6 @@ func (x *HealthcheckResponse) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthcheckResponse) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1802,13 +8396,174 @@ func (x *HealthcheckResponse) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthcheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthcheckResponse) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthcheckResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthcheckResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthcheckResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthcheckResponse) SetBody(v *HealthcheckResponse_Body) { + x.Body = v +} +func (x *HealthcheckResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthcheckResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthcheckResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthcheckResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthcheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthcheckResponse_Body + f = new(HealthcheckResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthcheckRequest_Body struct { +} + +var ( + _ encoding.ProtoMarshaler = (*HealthcheckRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*HealthcheckRequest_Body)(nil) + _ json.Marshaler = (*HealthcheckRequest_Body)(nil) + _ json.Unmarshaler = (*HealthcheckRequest_Body)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1819,18 +8574,93 @@ func (x *HealthcheckRequest_Body) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthcheckRequest_Body) StableMarshal(buf []byte) []byte { - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthcheckRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *HealthcheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthcheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest_Body") + } + switch fc.FieldNum { + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthcheckRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthcheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + out.RawByte('{') + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthcheckRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthcheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type HealthcheckRequest struct { + Body *HealthcheckRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*HealthcheckRequest)(nil) + _ encoding.ProtoUnmarshaler = (*HealthcheckRequest)(nil) + _ json.Marshaler = (*HealthcheckRequest)(nil) + _ json.Unmarshaler = (*HealthcheckRequest)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -1843,27 +8673,6 @@ func (x *HealthcheckRequest) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *HealthcheckRequest) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body) - offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature) - return buf -} - // ReadSignedData fills buf with signed data of x. // If buffer length is less than x.SignedDataSize(), new buffer is allocated. // @@ -1879,9 +8688,160 @@ func (x *HealthcheckRequest) SignedDataSize() int { // // Structures with the same field values have the same signed data size. func (x *HealthcheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().StableMarshal(buf), nil + return x.GetBody().MarshalProtobuf(buf), nil } -func (x *HealthcheckRequest) SetSignature(sig *Signature) { - x.Signature = sig +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *HealthcheckRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *HealthcheckRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(HealthcheckRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *HealthcheckRequest) SetBody(v *HealthcheckRequest_Body) { + x.Body = v +} +func (x *HealthcheckRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *HealthcheckRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *HealthcheckRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *HealthcheckRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *HealthcheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *HealthcheckRequest_Body + f = new(HealthcheckRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go index 4c293a4c0..63f96e11a 100644 --- a/pkg/services/tree/service_grpc.pb.go +++ b/pkg/services/tree/service_grpc.pb.go @@ -3,7 +3,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc v4.25.0 // source: pkg/services/tree/service.proto @@ -18,8 +18,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 const ( TreeService_Add_FullMethodName = "/tree.TreeService/Add" @@ -70,9 +70,8 @@ func NewTreeServiceClient(cc grpc.ClientConnInterface) TreeServiceClient { } func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddResponse) - err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,9 +79,8 @@ func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grp } func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddByPathResponse) - err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -90,9 +88,8 @@ func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, } func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RemoveResponse) - err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -100,9 +97,8 @@ func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts } func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(MoveResponse) - err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -110,9 +106,8 @@ func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...g } func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetNodeByPathResponse) - err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -120,12 +115,11 @@ func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPath } func (c *treeServiceClient) GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, cOpts...) + stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, opts...) if err != nil { return nil, err } - x := &treeServiceGetSubTreeClient{ClientStream: stream} + x := &treeServiceGetSubTreeClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -153,9 +147,8 @@ func (x *treeServiceGetSubTreeClient) Recv() (*GetSubTreeResponse, error) { } func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TreeListResponse) - err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -163,9 +156,8 @@ func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, o } func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ApplyResponse) - err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -173,12 +165,11 @@ func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts .. } func (c *treeServiceClient) GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, cOpts...) + stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, opts...) if err != nil { return nil, err } - x := &treeServiceGetOpLogClient{ClientStream: stream} + x := &treeServiceGetOpLogClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -206,9 +197,8 @@ func (x *treeServiceGetOpLogClient) Recv() (*GetOpLogResponse, error) { } func (c *treeServiceClient) Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HealthcheckResponse) - err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -383,7 +373,7 @@ func _TreeService_GetSubTree_Handler(srv interface{}, stream grpc.ServerStream) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{ServerStream: stream}) + return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{stream}) } type TreeService_GetSubTreeServer interface { @@ -440,7 +430,7 @@ func _TreeService_GetOpLog_Handler(srv interface{}, stream grpc.ServerStream) er if err := stream.RecvMsg(m); err != nil { return err } - return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{ServerStream: stream}) + return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{stream}) } type TreeService_GetOpLogServer interface { diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 58cab659f..8221a4546 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -8,19 +8,17 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" ) type message interface { @@ -30,16 +28,7 @@ type message interface { SetSignature(*Signature) } -func basicACLErr(op acl.Op) error { - return fmt.Errorf("access to operation %s is denied by basic ACL check", op) -} - -func eACLErr(op eacl.Operation, err error) error { - return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err) -} - var ( - errBearerWrongOwner = errors.New("bearer token must be signed by the container owner") errBearerWrongContainer = errors.New("bearer token is created for another container") errBearerSignature = errors.New("invalid bearer token signature") ) @@ -49,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -60,16 +49,14 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return err } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return fmt.Errorf("can't get container %s: %w", cid, err) } - eaclOp := eACLOp(op) - - bt, err := parseBearer(rawBearer, cid, eaclOp) + bt, err := parseBearer(rawBearer, cid) if err != nil { - return err + return fmt.Errorf("access to operation %s is denied: %w", op, err) } role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt) @@ -77,56 +64,22 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - basicACL := cnr.Value.BasicACL() - // Basic ACL mask can be unset, if a container operations are performed - // with strict APE checks only. - // - // FIXME(@aarifullin): tree service temporiraly performs APE checks on - // object verbs, because tree verbs have not been introduced yet. - if basicACL == 0x0 { - return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey) + if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { + return apeErr(err) } + return nil +} - if !basicACL.IsOpAllowed(op, role) { - return basicACLErr(op) +func apeErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal } - - if !basicACL.Extendable() { - return nil - } - - var useBearer bool - if len(rawBearer) != 0 { - if !basicACL.AllowedBearerRules(op) { - s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL, - zap.String("cid", cid.EncodeToString()), - zap.Stringer("op", op), - ) - } else { - useBearer = true - } - } - - var tb eacl.Table - signer := req.GetSignature().GetKey() - if useBearer && !bt.Impersonate() { - if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) { - return eACLErr(eaclOp, errBearerWrongOwner) - } - tb = bt.EACLTable() - } else { - tbCore, err := s.eaclSource.GetEACL(cid) - if err != nil { - return handleGetEACLError(err) - } - tb = *tbCore.Value - - if useBearer && bt.Impersonate() { - signer = bt.SigningKeyBytes() - } - } - - return checkEACL(tb, signer, eACLRole(role), eaclOp) + errAccessDenied := &apistatus.ObjectAccessDenied{} + errAccessDenied.WriteReason(err.Error()) + return errAccessDenied } // Returns true iff the operation is read-only and request was signed @@ -142,40 +95,32 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for i := range s.authorizedKeys { - if bytes.Equal(s.authorizedKeys[i], key) { + for _, currentKey := range *s.authorizedKeys.Load() { + if bytes.Equal(currentKey, key) { return true, nil } } return false, nil } -func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) { +func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) { if len(rawBearer) == 0 { return nil, nil } bt := new(bearer.Token) if err := bt.Unmarshal(rawBearer); err != nil { - return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err)) + return nil, fmt.Errorf("invalid bearer token: %w", err) } if !bt.AssertContainer(cid) { - return nil, eACLErr(eaclOp, errBearerWrongContainer) + return nil, errBearerWrongContainer } if !bt.VerifySignature() { - return nil, eACLErr(eaclOp, errBearerSignature) + return nil, errBearerSignature } return bt, nil } -func handleGetEACLError(err error) error { - if client.IsErrEACLNotFound(err) { - return nil - } - - return fmt.Errorf("get eACL table: %w", err) -} - func verifyMessage(m message) error { binBody, err := m.ReadSignedData(nil) if err != nil { @@ -249,84 +194,3 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a return role, pub, nil } - -func eACLOp(op acl.Op) eacl.Operation { - switch op { - case acl.OpObjectGet: - return eacl.OperationGet - case acl.OpObjectPut: - return eacl.OperationPut - default: - panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op)) - } -} - -func eACLRole(role acl.Role) eacl.Role { - switch role { - case acl.RoleOwner: - return eacl.RoleUser - case acl.RoleOthers: - return eacl.RoleOthers - default: - panic(fmt.Sprintf("unexpected tree service ACL role: %s", role)) - } -} - -var ( - errDENY = errors.New("DENY eACL rule") - errNoAllowRules = errors.New("not found allowing rules for the request") -) - -// checkEACL searches for the eACL rules that could be applied to the request -// (a tuple of a signer key, his FrostFS role and a request operation). -// It does not filter the request by the filters of the eACL table since tree -// requests do not contain any "object" information that could be filtered and, -// therefore, filtering leads to unexpected results. -// The code was copied with the minor updates from the SDK repo: -// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28. -func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error { - for _, record := range tb.Records() { - // check type of operation - if record.Operation() != op { - continue - } - - // check target - if !targetMatches(record, role, signer) { - continue - } - - switch a := record.Action(); a { - case eacl.ActionAllow: - return nil - case eacl.ActionDeny: - return eACLErr(op, errDENY) - default: - return eACLErr(op, fmt.Errorf("unexpected action: %s", a)) - } - } - - return eACLErr(op, errNoAllowRules) -} - -func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool { - for _, target := range rec.Targets() { - // check public key match - if pubs := target.BinaryKeys(); len(pubs) != 0 { - for _, key := range pubs { - if bytes.Equal(key, signer) { - return true - } - } - - continue - } - - // check target group match - if role == target.Role() { - return true - } - } - - return false -} diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index ce5039f7c..8815c227f 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -4,32 +4,70 @@ import ( "context" "crypto/ecdsa" "crypto/sha256" + "encoding/hex" "errors" "testing" - aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" + "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "github.com/stretchr/testify/require" ) +const versionTreeID = "version" + type dummyNetmapSource struct { netmap.Source } +type dummySubjectProvider struct { + subjects map[util.Uint160]client.SubjectExtended +} + +func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { + res := s.subjects[addr] + return &client.Subject{ + PrimaryKey: res.PrimaryKey, + AdditionalKeys: res.AdditionalKeys, + Namespace: res.Namespace, + Name: res.Name, + KV: res.KV, + }, nil +} + +func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { + res := s.subjects[addr] + return &res, nil +} + +type dummyEpochSource struct { + epoch uint64 +} + +func (s dummyEpochSource) CurrentEpoch() uint64 { + return s.epoch +} + type dummyContainerSource map[string]*containercore.Container -func (s dummyContainerSource) List() ([]cid.ID, error) { +func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { res := make([]cid.ID, 0, len(s)) var cnr cid.ID @@ -45,7 +83,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) { return res, nil } -func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { +func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) { cnt, ok := s[id.String()] if !ok { return nil, errors.New("container not found") @@ -53,20 +91,10 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { return cnt, nil } -func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) { +func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) { return &containercore.DelInfo{}, nil } -type dummyEACLSource map[string]*containercore.EACL - -func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) { - cntEACL, ok := s[id.String()] - if !ok { - return nil, errors.New("container not found") - } - return cntEACL, nil -} - func testContainer(owner user.ID) container.Container { var r netmapSDK.ReplicaDescriptor r.SetNumberOfObjects(1) @@ -81,6 +109,8 @@ func testContainer(owner user.ID) container.Container { return cnt } +const currentEpoch = 123 + func TestMessageSign(t *testing.T) { privs := make([]*keys.PrivateKey, 4) for i := range privs { @@ -99,6 +129,15 @@ func TestMessageSign(t *testing.T) { Value: testContainer(ownerID), } + e := inmemory.NewInMemoryLocalOverrides() + e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{ + Type: engine.Container, + Name: cid1.EncodeToString(), + }, testChain(privs[0].PublicKey(), privs[1].PublicKey())) + frostfsidProvider := dummySubjectProvider{ + subjects: make(map[util.Uint160]client.SubjectExtended), + } + s := &Service{ cfg: cfg{ log: test.NewLogger(t), @@ -107,14 +146,13 @@ func TestMessageSign(t *testing.T) { cnrSource: dummyContainerSource{ cid1.String(): cnr, }, - eaclSource: dummyEACLSource{ - cid1.String(): &containercore.EACL{ - Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()), - }, - }, + frostfsidSubjectProvider: frostfsidProvider, + state: dummyEpochSource{epoch: currentEpoch}, }, + apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } + s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) @@ -123,7 +161,7 @@ func TestMessageSign(t *testing.T) { ContainerId: rawCID1, ParentId: 1, NodeId: 2, - Meta: []*KeyValue{ + Meta: []KeyValue{ {Key: "kkk", Value: []byte("vvv")}, }, }, @@ -133,26 +171,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -165,7 +203,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -174,7 +212,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -182,7 +220,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -194,20 +232,112 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + + t.Run("omit override within bt", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + }) + + t.Run("invalid override within bearer token", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) }) t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate, but target user is still set", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) bt.SetImpersonate(true) + var reqSigner user.ID + user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey())) + + bt.ForUser(reqSigner) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate but invalid signer", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -217,64 +347,95 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token { var b bearer.Token - b.SetEACLTable(*testTable(cid, forPutGet, forGet)) + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid.EncodeToString(), + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) return b } -func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table { - tgtGet := eaclSDK.NewTarget() - tgtGet.SetRole(eaclSDK.RoleUnknown) - tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()}) +func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) - rGet := eaclSDK.NewRecord() - rGet.SetAction(eaclSDK.ActionAllow) - rGet.SetOperation(eaclSDK.OperationGet) - rGet.SetTargets(*tgtGet) + return b +} - tgtPut := eaclSDK.NewTarget() - tgtPut.SetRole(eaclSDK.RoleUnknown) - tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()}) +func testBearerTokenNoOverride() bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + return b +} - rPut := eaclSDK.NewRecord() - rPut.SetAction(eaclSDK.ActionAllow) - rPut.SetOperation(eaclSDK.OperationPut) - rPut.SetTargets(*tgtPut) - - tb := eaclSDK.NewTable() - tb.AddRecord(rGet) - tb.AddRecord(rPut) - - tgt := eaclSDK.NewTarget() - tgt.SetRole(eaclSDK.RoleOthers) - - for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} { - r := eaclSDK.NewRecord() - r.SetAction(eaclSDK.ActionDeny) - r.SetTargets(*tgt) - r.SetOperation(op) - tb.AddRecord(r) +func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { + ruleGet := chain.Rule{ + Status: chain.Allow, + Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, + Actions: chain.Actions{Names: []string{native.MethodGetObject}}, + Any: true, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forPutGet.Bytes()), + }, + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forGet.Bytes()), + }, + }, + } + rulePut := chain.Rule{ + Status: chain.Allow, + Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, + Actions: chain.Actions{Names: []string{native.MethodPutObject}}, + Any: true, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forPutGet.Bytes()), + }, + }, } - tb.SetCID(cid) - - return tb + return &chain.Chain{ + Rules: []chain.Rule{ + ruleGet, + rulePut, + }, + } } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 0f85f50b1..af355639f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,7 +2,9 @@ package tree import ( "context" + "crypto/ecdsa" "crypto/sha256" + "crypto/tls" "errors" "fmt" "io" @@ -13,6 +15,8 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,12 +24,15 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -39,7 +46,7 @@ const defaultSyncWorkerCount = 20 // tree IDs from the other container nodes. Returns ErrNotInContainer if the node // is not included in the container. func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -71,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) + err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = c.TreeList(fCtx, req) if outErr != nil { return false } @@ -92,7 +99,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { for _, tid := range treesToSync { h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree, + s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) continue @@ -100,7 +107,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes) if h < newHeight { if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil { - s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, + s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) } @@ -112,7 +119,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { // SynchronizeTree tries to synchronize log starting from the last stored height. func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -131,14 +138,9 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string } // mergeOperationStreams performs merge sort for node operation streams to one stream. -func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { +func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { defer close(merged) - ms := make([]*pilorama.Move, len(streams)) - for i := range streams { - ms[i] = <-streams[i] - } - // Merging different node streams shuffles incoming operations like that: // // x - operation from the stream A @@ -150,6 +152,15 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram // operation height from the stream B. This height is stored in minStreamedLastHeight. var minStreamedLastHeight uint64 = math.MaxUint64 + ms := make([]*pilorama.Move, len(streams)) + for i := range streams { + select { + case ms[i] = <-streams[i]: + case <-ctx.Done(): + return minStreamedLastHeight + } + } + for { var minTimeMoveTime uint64 = math.MaxUint64 minTimeMoveIndex := -1 @@ -164,7 +175,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram break } - merged <- ms[minTimeMoveIndex] + select { + case merged <- ms[minTimeMoveIndex]: + case <-ctx.Done(): + return minStreamedLastHeight + } height := ms[minTimeMoveIndex].Time if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { minStreamedLastHeight = min(minStreamedLastHeight, height) @@ -176,40 +191,30 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, operationStream <-chan *pilorama.Move, -) uint64 { - errGroup, _ := errgroup.WithContext(ctx) - const workersCount = 1024 - errGroup.SetLimit(workersCount) - - // We run TreeApply concurrently for the operation batch. Let's consider two operations - // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail - // on m1. That means the service must start sync from m1.Time in the next iteration and - // this height is stored in unappliedOperationHeight. - var unappliedOperationHeight uint64 = math.MaxUint64 - var heightMtx sync.Mutex - +) (uint64, error) { var prev *pilorama.Move + var batch []*pilorama.Move for m := range operationStream { - m := m - // skip already applied op if prev != nil && prev.Time == m.Time { continue } prev = m + batch = append(batch, m) - errGroup.Go(func() error { - if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil { - heightMtx.Lock() - unappliedOperationHeight = min(unappliedOperationHeight, m.Time) - heightMtx.Unlock() - return err + if len(batch) == s.syncBatchSize { + if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { + return batch[0].Time, err } - return nil - }) + batch = batch[:0] + } } - _ = errGroup.Wait() - return unappliedOperationHeight + if len(batch) > 0 { + if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { + return batch[0].Time, err + } + } + return math.MaxUint64, nil } func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, @@ -242,10 +247,14 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { + if err := m.FromBytes(lm.GetMeta()); err != nil { return err } - opsCh <- m + select { + case opsCh <- m: + case <-ctx.Done(): + return ctx.Err() + } } if !errors.Is(err, io.EOF) { return err @@ -261,7 +270,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, treeID string, nodes []netmapSDK.NodeInfo, ) uint64 { - s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) + s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) errGroup, egCtx := errgroup.WithContext(ctx) const workersCount = 1024 @@ -274,44 +283,43 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, merged := make(chan *pilorama.Move) var minStreamedLastHeight uint64 errGroup.Go(func() error { - minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged) + minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged) return nil }) var minUnappliedHeight uint64 errGroup.Go(func() error { - minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged) - return nil + var err error + minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged) + return err }) var allNodesSynced atomic.Bool allNodesSynced.Store(true) for i, n := range nodes { - i := i - n := n errGroup.Go(func() error { var nodeSynced bool - n.IterateNetworkEndpoints(func(addr string) bool { + for addr := range n.NetworkEndpoints() { var a network.Address if err := a.FromString(addr); err != nil { - s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) + continue } - cc, err := s.dialCtx(egCtx, a) + cc, err := dialTreeService(ctx, a, s.key, s.ds) if err != nil { - s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) + continue } - defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { - s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) + s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - return true - }) + _ = cc.Close() + break + } close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) @@ -321,7 +329,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, } if err := errGroup.Wait(); err != nil { allNodesSynced.Store(false) - s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) + s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) } newHeight := minStreamedLastHeight @@ -336,17 +344,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func (*Service) dialCtx(egCtx context.Context, a network.Address) (*grpc.ClientConn, error) { - return grpc.DialContext(egCtx, a.URIAddr(), +func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { + cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, key); err != nil { + return nil, err + } + + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} + +func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + host, isTLS, err := client.ParseURI(a.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + + defaultOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInteceptor(), + tracing_grpc.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(insecure.NewCredentials())) + grpc.WithTransportCredentials(creds), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), + } + + return grpc.NewClient(host, append(defaultOpts, opts...)...) } // ErrAlreadySyncing is returned when a service synchronization has already @@ -386,25 +437,25 @@ func (s *Service) syncLoop(ctx context.Context) { return case <-s.syncChan: ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync") - s.log.Debug(logs.TreeSyncingTrees) + s.log.Info(ctx, logs.TreeSyncingTrees) start := time.Now() - cnrs, err := s.cfg.cnrSource.List() + cnrs, err := s.cnrSource.List(ctx) if err != nil { - s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err)) + s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) span.End() break } - newMap, cnrsToSync := s.containersToSync(cnrs) + newMap, cnrsToSync := s.containersToSync(ctx, cnrs) s.syncContainers(ctx, cnrsToSync) s.removeContainers(ctx, newMap) - s.log.Debug(logs.TreeTreesHaveBeenSynchronized) + s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized) s.metrics.AddSyncDuration(time.Since(start), true) span.End() @@ -421,22 +472,22 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { var wg sync.WaitGroup for _, cnr := range cnrs { wg.Add(1) - cnr := cnr + err := s.syncPool.Submit(func() { defer wg.Done() - s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) err := s.synchronizeAllTrees(ctx, cnr) if err != nil { - s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) + s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) return } - s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) }) if err != nil { wg.Done() - s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization, + s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization, zap.Stringer("cid", cnr), zap.Error(err)) if errors.Is(err, ants.ErrPoolClosed) { @@ -460,9 +511,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID continue } - existed, err := containerCore.WasRemoved(s.cnrSource, cnr) + existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr) if err != nil { - s.log.Error(logs.TreeCouldNotCheckIfContainerExisted, + s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, zap.Stringer("cid", cnr), zap.Error(err)) } else if existed { @@ -474,25 +525,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } for _, cnr := range removed { - s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) err := s.DropTree(ctx, cnr, "") if err != nil { - s.log.Error(logs.TreeCouldNotRemoveRedundantTree, + s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree, zap.Stringer("cid", cnr), zap.Error(err)) } } } -func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { +func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { newMap := make(map[cid.ID]struct{}, len(s.cnrMap)) cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { - _, pos, err := s.getContainerNodes(cnr) + _, pos, err := s.getContainerNodes(ctx, cnr) if err != nil { - s.log.Error(logs.TreeCouldNotCalculateContainerNodes, + s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go index 190b4ccbb..87d419408 100644 --- a/pkg/services/tree/sync_test.go +++ b/pkg/services/tree/sync_test.go @@ -1,6 +1,7 @@ package tree import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -51,8 +52,6 @@ func Test_mergeOperationStreams(t *testing.T) { // generate and put values to all chans for i, ch := range nodeOpChans { - i := i - ch := ch go func() { for _, tm := range tt.opTimes[i] { op := &pilorama.Move{} @@ -66,7 +65,7 @@ func Test_mergeOperationStreams(t *testing.T) { merged := make(chan *pilorama.Move, 1) min := make(chan uint64) go func() { - min <- mergeOperationStreams(nodeOpChans, merged) + min <- mergeOperationStreams(context.Background(), nodeOpChans, merged) }() var res []uint64 diff --git a/pkg/services/tree/types.pb.go b/pkg/services/tree/types.pb.go deleted file mode 100644 index 6464ccb77..000000000 --- a/pkg/services/tree/types.pb.go +++ /dev/null @@ -1,320 +0,0 @@ -//* -// Auxiliary structures to use with tree service. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc v4.25.0 -// source: pkg/services/tree/types.proto - -package tree - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// KeyValue represents key-value pair attached to an object. -type KeyValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Attribute name. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Attribute value. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *KeyValue) Reset() { - *x = KeyValue{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_types_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KeyValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KeyValue) ProtoMessage() {} - -func (x *KeyValue) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_types_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. -func (*KeyValue) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{0} -} - -func (x *KeyValue) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *KeyValue) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -// LogMove represents log-entry for a single move operation. -type LogMove struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID of the parent node. - ParentId uint64 `protobuf:"varint,1,opt,name=parent_id,json=parentID,proto3" json:"parent_id,omitempty"` - // Node meta information, including operation timestamp. - Meta []byte `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"` - // ID of the node to move. - ChildId uint64 `protobuf:"varint,3,opt,name=child_id,json=childID,proto3" json:"child_id,omitempty"` -} - -func (x *LogMove) Reset() { - *x = LogMove{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_types_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LogMove) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LogMove) ProtoMessage() {} - -func (x *LogMove) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_types_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LogMove.ProtoReflect.Descriptor instead. -func (*LogMove) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{1} -} - -func (x *LogMove) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} - -func (x *LogMove) GetMeta() []byte { - if x != nil { - return x.Meta - } - return nil -} - -func (x *LogMove) GetChildId() uint64 { - if x != nil { - return x.ChildId - } - return 0 -} - -// Signature of a message. -type Signature struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized public key as defined in FrostFS API. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Signature of a message body. - Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"` -} - -func (x *Signature) Reset() { - *x = Signature{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_services_tree_types_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Signature) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Signature) ProtoMessage() {} - -func (x *Signature) ProtoReflect() protoreflect.Message { - mi := &file_pkg_services_tree_types_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Signature.ProtoReflect.Descriptor instead. -func (*Signature) Descriptor() ([]byte, []int) { - return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{2} -} - -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} - -var File_pkg_services_tree_types_proto protoreflect.FileDescriptor - -var file_pkg_services_tree_types_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, - 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x07, 0x4c, 0x6f, 0x67, - 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x44, - 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e, - 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, - 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, - 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_services_tree_types_proto_rawDescOnce sync.Once - file_pkg_services_tree_types_proto_rawDescData = file_pkg_services_tree_types_proto_rawDesc -) - -func file_pkg_services_tree_types_proto_rawDescGZIP() []byte { - file_pkg_services_tree_types_proto_rawDescOnce.Do(func() { - file_pkg_services_tree_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_types_proto_rawDescData) - }) - return file_pkg_services_tree_types_proto_rawDescData -} - -var file_pkg_services_tree_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_pkg_services_tree_types_proto_goTypes = []interface{}{ - (*KeyValue)(nil), // 0: tree.KeyValue - (*LogMove)(nil), // 1: tree.LogMove - (*Signature)(nil), // 2: tree.Signature -} -var file_pkg_services_tree_types_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_pkg_services_tree_types_proto_init() } -func file_pkg_services_tree_types_proto_init() { - if File_pkg_services_tree_types_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_services_tree_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LogMove); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_services_tree_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Signature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_services_tree_types_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pkg_services_tree_types_proto_goTypes, - DependencyIndexes: file_pkg_services_tree_types_proto_depIdxs, - MessageInfos: file_pkg_services_tree_types_proto_msgTypes, - }.Build() - File_pkg_services_tree_types_proto = out.File - file_pkg_services_tree_types_proto_rawDesc = nil - file_pkg_services_tree_types_proto_goTypes = nil - file_pkg_services_tree_types_proto_depIdxs = nil -} diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go index 707fcc3cc..2827b10a9 100644 --- a/pkg/services/tree/types_frostfs.pb.go +++ b/pkg/services/tree/types_frostfs.pb.go @@ -2,7 +2,29 @@ package tree -import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" +import ( + json "encoding/json" + fmt "fmt" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" + easyproto "github.com/VictoriaMetrics/easyproto" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" +) + +type KeyValue struct { + Key string `json:"key"` + Value []byte `json:"value"` +} + +var ( + _ encoding.ProtoMarshaler = (*KeyValue)(nil) + _ encoding.ProtoUnmarshaler = (*KeyValue)(nil) + _ json.Marshaler = (*KeyValue)(nil) + _ json.Unmarshaler = (*KeyValue)(nil) +) // StableSize returns the size of x in protobuf format. // @@ -16,27 +38,176 @@ func (x *KeyValue) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *KeyValue) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.StringMarshal(1, buf[offset:], x.Key) - offset += proto.BytesMarshal(2, buf[offset:], x.Value) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *KeyValue) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *KeyValue) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendString(1, x.Key) + } + if len(x.Value) != 0 { + mm.AppendBytes(2, x.Value) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *KeyValue) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "KeyValue") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Value + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Value") + } + x.Value = data + } + } + return nil +} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} +func (x *KeyValue) SetKey(v string) { + x.Key = v +} +func (x *KeyValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} +func (x *KeyValue) SetValue(v []byte) { + x.Value = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *KeyValue) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + out.String(x.Key) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" + out.RawString(prefix) + if x.Value != nil { + out.Base64Bytes(x.Value) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *KeyValue) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f string + f = in.String() + x.Key = f + } + case "value": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Value = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type LogMove struct { + ParentId uint64 `json:"parentID"` + Meta []byte `json:"meta"` + ChildId uint64 `json:"childID"` +} + +var ( + _ encoding.ProtoMarshaler = (*LogMove)(nil) + _ encoding.ProtoUnmarshaler = (*LogMove)(nil) + _ json.Marshaler = (*LogMove)(nil) + _ json.Unmarshaler = (*LogMove)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -50,28 +221,229 @@ func (x *LogMove) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *LogMove) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.UInt64Marshal(1, buf[offset:], x.ParentId) - offset += proto.BytesMarshal(2, buf[offset:], x.Meta) - offset += proto.UInt64Marshal(3, buf[offset:], x.ChildId) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *LogMove) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst } +func (x *LogMove) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.ParentId != 0 { + mm.AppendUint64(1, x.ParentId) + } + if len(x.Meta) != 0 { + mm.AppendBytes(2, x.Meta) + } + if x.ChildId != 0 { + mm.AppendUint64(3, x.ChildId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *LogMove) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "LogMove") + } + switch fc.FieldNum { + case 1: // ParentId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ParentId") + } + x.ParentId = data + case 2: // Meta + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Meta") + } + x.Meta = data + case 3: // ChildId + data, ok := fc.Uint64() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ChildId") + } + x.ChildId = data + } + } + return nil +} +func (x *LogMove) GetParentId() uint64 { + if x != nil { + return x.ParentId + } + return 0 +} +func (x *LogMove) SetParentId(v uint64) { + x.ParentId = v +} +func (x *LogMove) GetMeta() []byte { + if x != nil { + return x.Meta + } + return nil +} +func (x *LogMove) SetMeta(v []byte) { + x.Meta = v +} +func (x *LogMove) GetChildId() uint64 { + if x != nil { + return x.ChildId + } + return 0 +} +func (x *LogMove) SetChildId(v uint64) { + x.ChildId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *LogMove) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentID\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" + out.RawString(prefix) + if x.Meta != nil { + out.Base64Bytes(x.Meta) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"childID\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10) + out.RawByte('"') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *LogMove) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "parentID": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ParentId = f + } + case "meta": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Meta = f + } + case "childID": + { + var f uint64 + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv + x.ChildId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type Signature struct { + Key []byte `json:"key"` + Sign []byte `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*Signature)(nil) + _ encoding.ProtoUnmarshaler = (*Signature)(nil) + _ json.Marshaler = (*Signature)(nil) + _ json.Unmarshaler = (*Signature)(nil) +) + // StableSize returns the size of x in protobuf format. // // Structures with the same field values have the same binary size. @@ -84,23 +456,169 @@ func (x *Signature) StableSize() (size int) { return size } -// StableMarshal marshals x in protobuf binary format with stable field order. -// -// If buffer length is less than x.StableSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same binary format. -func (x *Signature) StableMarshal(buf []byte) []byte { - if x == nil { - return []byte{} - } - if buf == nil { - buf = make([]byte, x.StableSize()) - } - var offset int - offset += proto.BytesMarshal(1, buf[offset:], x.Key) - offset += proto.BytesMarshal(2, buf[offset:], x.Sign) - return buf +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *Signature) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.Key) != 0 { + mm.AppendBytes(1, x.Key) + } + if len(x.Sign) != 0 { + mm.AppendBytes(2, x.Sign) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "Signature") + } + switch fc.FieldNum { + case 1: // Key + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Key") + } + x.Key = data + case 2: // Sign + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Sign") + } + x.Sign = data + } + } + return nil +} +func (x *Signature) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} +func (x *Signature) SetKey(v []byte) { + x.Key = v +} +func (x *Signature) GetSign() []byte { + if x != nil { + return x.Sign + } + return nil +} +func (x *Signature) SetSign(v []byte) { + x.Sign = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *Signature) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *Signature) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Key = f + } + case "signature": + { + var f []byte + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + x.Sign = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } } diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go index 005a643e5..5152a8ece 100644 --- a/pkg/services/util/response/service.go +++ b/pkg/services/util/response/service.go @@ -1,10 +1,10 @@ package response import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" ) diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go index bce43d6e8..348a45a94 100644 --- a/pkg/services/util/sign.go +++ b/pkg/services/util/sign.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) diff --git a/internal/ape/converter.go b/pkg/util/ape/converter.go similarity index 99% rename from internal/ape/converter.go rename to pkg/util/ape/converter.go index eb80e7ded..c706cf052 100644 --- a/internal/ape/converter.go +++ b/pkg/util/ape/converter.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "fmt" - v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" + v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" diff --git a/internal/ape/converter_test.go b/pkg/util/ape/converter_test.go similarity index 100% rename from internal/ape/converter_test.go rename to pkg/util/ape/converter_test.go diff --git a/cmd/frostfs-cli/modules/util/ape.go b/pkg/util/ape/parser.go similarity index 85% rename from cmd/frostfs-cli/modules/util/ape.go rename to pkg/util/ape/parser.go index 532dc0a50..6f114d45b 100644 --- a/cmd/frostfs-cli/modules/util/ape.go +++ b/pkg/util/ape/parser.go @@ -1,16 +1,14 @@ -package util +package ape import ( "errors" "fmt" "os" - "strconv" "strings" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/flynn-archive/go-shlex" - "github.com/spf13/cobra" ) var ( @@ -27,38 +25,6 @@ var ( errFailedToParseAllAny = errors.New("any/all is not parsed") ) -// PrintHumanReadableAPEChain print APE chain rules. -func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { - cmd.Println("Chain ID: " + string(chain.ID)) - cmd.Printf(" HEX: %x\n", chain.ID) - cmd.Println("Rules:") - for _, rule := range chain.Rules { - cmd.Println("\n\tStatus: " + rule.Status.String()) - cmd.Println("\tAny: " + strconv.FormatBool(rule.Any)) - cmd.Println("\tConditions:") - for _, c := range rule.Condition { - var ot string - switch c.Kind { - case apechain.KindResource: - ot = "Resource" - case apechain.KindRequest: - ot = "Request" - default: - panic("unknown object type") - } - cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value)) - } - cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted)) - for _, name := range rule.Actions.Names { - cmd.Println("\t\t" + name) - } - cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted)) - for _, name := range rule.Resources.Names { - cmd.Println("\t\t" + name) - } - } -} - func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error { data, err := os.ReadFile(path) if err != nil { @@ -208,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } else if strings.EqualFold(expression, "QuotaLimitReached") { - return apechain.QuotaLimitReached, nil - } else { - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } + if strings.EqualFold(expression, "QuotaLimitReached") { + return apechain.QuotaLimitReached, nil + } + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail @@ -239,6 +205,8 @@ func parseAction(lexeme string) ([]string, bool, error) { return []string{nativeschema.MethodRangeObject}, true, nil case "object.hash": return []string{nativeschema.MethodHashObject}, true, nil + case "object.patch": + return []string{nativeschema.MethodPatchObject}, true, nil case "object.*": return []string{ nativeschema.MethodPutObject, @@ -248,6 +216,7 @@ func parseAction(lexeme string) ([]string, bool, error) { nativeschema.MethodSearchObject, nativeschema.MethodRangeObject, nativeschema.MethodHashObject, + nativeschema.MethodPatchObject, }, true, nil case "container.put": return []string{nativeschema.MethodPutContainer}, false, nil @@ -255,10 +224,6 @@ func parseAction(lexeme string) ([]string, bool, error) { return []string{nativeschema.MethodDeleteContainer}, false, nil case "container.get": return []string{nativeschema.MethodGetContainer}, false, nil - case "container.setcontainereacl": - return []string{nativeschema.MethodSetContainerEACL}, false, nil - case "container.getcontainereacl": - return []string{nativeschema.MethodGetContainerEACL}, false, nil case "container.list": return []string{nativeschema.MethodListContainers}, false, nil case "container.*": @@ -266,8 +231,6 @@ func parseAction(lexeme string) ([]string, bool, error) { nativeschema.MethodPutContainer, nativeschema.MethodDeleteContainer, nativeschema.MethodGetContainer, - nativeschema.MethodSetContainerEACL, - nativeschema.MethodGetContainerEACL, nativeschema.MethodListContainers, }, false, nil default: @@ -298,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) { } else { if lexeme == "*" { return nativeschema.ResourceFormatAllContainers, nil - } else if lexeme == "/*" { + } else if lexeme == "/*" || lexeme == "root/*" { return nativeschema.ResourceFormatRootContainers, nil } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { lexeme = lexeme[1:] diff --git a/cmd/frostfs-cli/modules/util/ape_test.go b/pkg/util/ape/parser_test.go similarity index 96% rename from cmd/frostfs-cli/modules/util/ape_test.go rename to pkg/util/ape/parser_test.go index b275803df..c236c4603 100644 --- a/cmd/frostfs-cli/modules/util/ape_test.go +++ b/pkg/util/ape/parser_test.go @@ -1,4 +1,4 @@ -package util +package ape import ( "fmt" @@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) { Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, }, }, + { + name: "Valid rule for all containers in explicit root namespace", + rule: "allow Container.Put root/*", + expectRule: policyengine.Rule{ + Status: policyengine.Allow, + Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}}, + Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}}, + }, + }, { name: "Valid rule for all objects in root namespace and container", rule: "allow Object.Put /cid/*", diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 547c8d50b..66581878a 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - }) + } require.Empty(t, mExp) } diff --git a/pkg/util/config/test/generate.go b/pkg/util/config/test/generate.go new file mode 100644 index 000000000..63e286615 --- /dev/null +++ b/pkg/util/config/test/generate.go @@ -0,0 +1,58 @@ +package configtest + +import ( + "crypto/rand" + "os" + "path" + "testing" + + "github.com/stretchr/testify/require" +) + +type MarshalFunc = func(any) ([]byte, error) + +type ConfigFile struct { + filename string + content map[string]any + marshal func(any) ([]byte, error) +} + +type DummyFile struct { + filename string + size int +} + +func NewConfigFile(filename string, content map[string]any, marshal MarshalFunc) ConfigFile { + return ConfigFile{ + filename: filename, + content: content, + marshal: marshal, + } +} + +func NewDummyFile(filename string, size int) DummyFile { + return DummyFile{ + filename: filename, + size: size, + } +} + +func PrepareConfigFiles(t *testing.T, dir string, files []ConfigFile) { + for _, file := range files { + data, err := file.marshal(file.content) + require.NoError(t, err) + + err = os.WriteFile(path.Join(dir, file.filename), data, 0o600) + require.NoError(t, err) + } +} + +func PrepareDummyFiles(t *testing.T, dir string, files []DummyFile) { + for _, file := range files { + data := make([]byte, file.size) + _, _ = rand.Read(data) + + err := os.WriteFile(path.Join(dir, file.filename), data, 0o600) + require.NoError(t, err) + } +} diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go index a9877e007..8569ec734 100644 --- a/pkg/util/http/calls.go +++ b/pkg/util/http/calls.go @@ -32,8 +32,8 @@ func (x *Server) Serve() error { // // Once Shutdown has been called on a server, it may not be reused; // future calls to Serve method will have no effect. -func (x *Server) Shutdown() error { - ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout) +func (x *Server) Shutdown(ctx context.Context) error { + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout) err := x.srv.Shutdown(ctx) diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go index 7a0413000..f85fd2ea9 100644 --- a/pkg/util/http/pprof.go +++ b/pkg/util/http/pprof.go @@ -3,8 +3,14 @@ package httputil import ( "net/http" "net/http/pprof" + + "github.com/felixge/fgprof" ) +func init() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) +} + // initializes pprof package in order to // register Prometheus handlers on http.DefaultServeMux. var _ = pprof.Handler("") diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 923412a7f..2589ab786 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - switch { - case c.shutdownTimeout <= 0: + if c.shutdownTimeout <= 0 { panicOnOptValue("shutdown timeout", c.shutdownTimeout) } diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index b2942b52a..6337039a9 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,6 +6,7 @@ import ( "os" "text/tabwriter" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - if err != nil { - panic("produced incorrect base58 value") - } + assert.NoError(err, "produced incorrect base58 value") return hex.EncodeToString(val) } diff --git a/pkg/util/locode/column/coordinates.go b/pkg/util/locode/column/coordinates.go deleted file mode 100644 index 5e32c016e..000000000 --- a/pkg/util/locode/column/coordinates.go +++ /dev/null @@ -1,193 +0,0 @@ -package locodecolumn - -import ( - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" -) - -const ( - minutesDigits = 2 - hemisphereSymbols = 1 -) - -const ( - latDegDigits = 2 - lngDegDigits = 3 -) - -type coordinateCode struct { - degDigits int - value []uint8 -} - -// LongitudeCode represents the value of the longitude -// of the location conforming to UN/LOCODE specification. -type LongitudeCode coordinateCode - -// LongitudeHemisphere represents the hemisphere of the earth -// // along the Greenwich meridian. -type LongitudeHemisphere [hemisphereSymbols]uint8 - -// LatitudeCode represents the value of the latitude -// of the location conforming to UN/LOCODE specification. -type LatitudeCode coordinateCode - -// LatitudeHemisphere represents the hemisphere of the earth -// along the equator. -type LatitudeHemisphere [hemisphereSymbols]uint8 - -func coordinateFromString(s string, degDigits int, hemisphereAlphabet []uint8) (*coordinateCode, error) { - if len(s) != degDigits+minutesDigits+hemisphereSymbols { - return nil, locode.ErrInvalidString - } - - for i := range s[:degDigits+minutesDigits] { - if !isDigit(s[i]) { - return nil, locode.ErrInvalidString - } - } - -loop: - for _, sym := range s[degDigits+minutesDigits:] { - for j := range hemisphereAlphabet { - if hemisphereAlphabet[j] == uint8(sym) { - continue loop - } - } - - return nil, locode.ErrInvalidString - } - - return &coordinateCode{ - degDigits: degDigits, - value: []uint8(s), - }, nil -} - -// LongitudeFromString parses a string and returns the location's longitude. -func LongitudeFromString(s string) (*LongitudeCode, error) { - cc, err := coordinateFromString(s, lngDegDigits, []uint8{'W', 'E'}) - if err != nil { - return nil, err - } - - return (*LongitudeCode)(cc), nil -} - -// LatitudeFromString parses a string and returns the location's latitude. -func LatitudeFromString(s string) (*LatitudeCode, error) { - cc, err := coordinateFromString(s, latDegDigits, []uint8{'N', 'S'}) - if err != nil { - return nil, err - } - - return (*LatitudeCode)(cc), nil -} - -func (cc *coordinateCode) degrees() []uint8 { - return cc.value[:cc.degDigits] -} - -// Degrees returns the longitude's degrees. -func (lc *LongitudeCode) Degrees() (l [lngDegDigits]uint8) { - copy(l[:], (*coordinateCode)(lc).degrees()) - return -} - -// Degrees returns the latitude's degrees. -func (lc *LatitudeCode) Degrees() (l [latDegDigits]uint8) { - copy(l[:], (*coordinateCode)(lc).degrees()) - return -} - -func (cc *coordinateCode) minutes() (mnt [minutesDigits]uint8) { - for i := 0; i < minutesDigits; i++ { - mnt[i] = cc.value[cc.degDigits+i] - } - - return -} - -// Minutes returns the longitude's minutes. -func (lc *LongitudeCode) Minutes() [minutesDigits]uint8 { - return (*coordinateCode)(lc).minutes() -} - -// Minutes returns the latitude's minutes. -func (lc *LatitudeCode) Minutes() [minutesDigits]uint8 { - return (*coordinateCode)(lc).minutes() -} - -// Hemisphere returns the longitude's hemisphere code. -func (lc *LongitudeCode) Hemisphere() LongitudeHemisphere { - return (*coordinateCode)(lc).hemisphere() -} - -// Hemisphere returns the latitude's hemisphere code. -func (lc *LatitudeCode) Hemisphere() LatitudeHemisphere { - return (*coordinateCode)(lc).hemisphere() -} - -func (cc *coordinateCode) hemisphere() (h [hemisphereSymbols]uint8) { - for i := 0; i < hemisphereSymbols; i++ { - h[i] = cc.value[cc.degDigits+minutesDigits+i] - } - - return h -} - -// North returns true for the northern hemisphere. -func (h LatitudeHemisphere) North() bool { - return h[0] == 'N' -} - -// East returns true for the eastern hemisphere. -func (h LongitudeHemisphere) East() bool { - return h[0] == 'E' -} - -// Coordinates represents the coordinates of the location from UN/LOCODE table. -type Coordinates struct { - lat *LatitudeCode - - lng *LongitudeCode -} - -// Latitude returns the location's latitude. -func (c *Coordinates) Latitude() *LatitudeCode { - return c.lat -} - -// Longitude returns the location's longitude. -func (c *Coordinates) Longitude() *LongitudeCode { - return c.lng -} - -// CoordinatesFromString parses a string and returns the location's coordinates. -func CoordinatesFromString(s string) (*Coordinates, error) { - if len(s) == 0 { - return nil, nil - } - - strs := strings.Split(s, " ") - if len(strs) != 2 { - return nil, locode.ErrInvalidString - } - - lat, err := LatitudeFromString(strs[0]) - if err != nil { - return nil, fmt.Errorf("could not parse latitude: %w", err) - } - - lng, err := LongitudeFromString(strs[1]) - if err != nil { - return nil, fmt.Errorf("could not parse longitude: %w", err) - } - - return &Coordinates{ - lat: lat, - lng: lng, - }, nil -} diff --git a/pkg/util/locode/column/country.go b/pkg/util/locode/column/country.go deleted file mode 100644 index 7b29a97c5..000000000 --- a/pkg/util/locode/column/country.go +++ /dev/null @@ -1,38 +0,0 @@ -package locodecolumn - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" -) - -const countryCodeLen = 2 - -// CountryCode represents ISO 3166 alpha-2 Country Code. -type CountryCode [countryCodeLen]uint8 - -// Symbols returns digits of the country code. -func (cc *CountryCode) Symbols() [countryCodeLen]uint8 { - return *cc -} - -// CountryCodeFromString parses a string and returns the country code. -func CountryCodeFromString(s string) (*CountryCode, error) { - if l := len(s); l != countryCodeLen { - return nil, fmt.Errorf("incorrect country code length: expect: %d, got: %d", - countryCodeLen, - l, - ) - } - - for i := range s { - if !isUpperAlpha(s[i]) { - return nil, locode.ErrInvalidString - } - } - - cc := CountryCode{} - copy(cc[:], s) - - return &cc, nil -} diff --git a/pkg/util/locode/column/location.go b/pkg/util/locode/column/location.go deleted file mode 100644 index 4303228fb..000000000 --- a/pkg/util/locode/column/location.go +++ /dev/null @@ -1,38 +0,0 @@ -package locodecolumn - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" -) - -const locationCodeLen = 3 - -// LocationCode represents 3-character code for the location. -type LocationCode [locationCodeLen]uint8 - -// Symbols returns characters of the location code. -func (lc *LocationCode) Symbols() [locationCodeLen]uint8 { - return *lc -} - -// LocationCodeFromString parses a string and returns the location code. -func LocationCodeFromString(s string) (*LocationCode, error) { - if l := len(s); l != locationCodeLen { - return nil, fmt.Errorf("incorrect location code length: expect: %d, got: %d", - locationCodeLen, - l, - ) - } - - for i := range s { - if !isUpperAlpha(s[i]) && !isDigit(s[i]) { - return nil, locode.ErrInvalidString - } - } - - lc := LocationCode{} - copy(lc[:], s) - - return &lc, nil -} diff --git a/pkg/util/locode/column/util.go b/pkg/util/locode/column/util.go deleted file mode 100644 index 8da1f9a25..000000000 --- a/pkg/util/locode/column/util.go +++ /dev/null @@ -1,9 +0,0 @@ -package locodecolumn - -func isDigit(sym uint8) bool { - return sym >= '0' && sym <= '9' -} - -func isUpperAlpha(sym uint8) bool { - return sym >= 'A' && sym <= 'Z' -} diff --git a/pkg/util/locode/db/airports/calls.go b/pkg/util/locode/db/airports/calls.go deleted file mode 100644 index dac8cce8b..000000000 --- a/pkg/util/locode/db/airports/calls.go +++ /dev/null @@ -1,194 +0,0 @@ -package airportsdb - -import ( - "encoding/csv" - "errors" - "fmt" - "io" - "os" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" -) - -const ( - _ = iota - 1 - - _ // Airport ID - _ // Name - airportCity - airportCountry - airportIATA - _ // ICAO - airportLatitude - airportLongitude - _ // Altitude - _ // Timezone - _ // DST - _ // Tz database time zone - _ // Type - _ // Source - - airportFldNum -) - -type record struct { - city, - country, - iata, - lat, - lng string -} - -// Get scans the records of the OpenFlights Airport to an in-memory table (once), -// and returns an entry that matches the passed UN/LOCODE record. -// -// Records are matched if they have the same country code and either -// same IATA code or same city name (location name in UN/LOCODE). -// -// Returns locodedb.ErrAirportNotFound if no entry matches. -func (db *DB) Get(locodeRecord locode.Record) (*locodedb.AirportRecord, error) { - if err := db.initAirports(); err != nil { - return nil, err - } - - records := db.mAirports[locodeRecord.LOCODE.CountryCode()] - - for i := range records { - if locodeRecord.LOCODE.LocationCode() != records[i].iata && - locodeRecord.NameWoDiacritics != records[i].city { - continue - } - - lat, err := strconv.ParseFloat(records[i].lat, 64) - if err != nil { - return nil, err - } - - lng, err := strconv.ParseFloat(records[i].lng, 64) - if err != nil { - return nil, err - } - - return &locodedb.AirportRecord{ - CountryName: records[i].country, - Point: locodedb.NewPoint(lat, lng), - }, nil - } - - return nil, locodedb.ErrAirportNotFound -} - -const ( - _ = iota - 1 - - countryName - countryISOCode - _ // dafif_code - - countryFldNum -) - -// CountryName scans the records of the OpenFlights Country table to an in-memory table (once), -// and returns the name of the country by code. -// -// Returns locodedb.ErrCountryNotFound if no entry matches. -func (db *DB) CountryName(code *locodedb.CountryCode) (name string, err error) { - if err = db.initCountries(); err != nil { - return - } - - argCode := code.String() - - for cName, cCode := range db.mCountries { - if cCode == argCode { - name = cName - break - } - } - - if name == "" { - err = locodedb.ErrCountryNotFound - } - - return -} - -func (db *DB) initAirports() (err error) { - db.airportsOnce.Do(func() { - db.mAirports = make(map[string][]record) - - if err = db.initCountries(); err != nil { - return - } - - err = db.scanWords(db.airports, airportFldNum, func(words []string) error { - countryCode := db.mCountries[words[airportCountry]] - if countryCode != "" { - db.mAirports[countryCode] = append(db.mAirports[countryCode], record{ - city: words[airportCity], - country: words[airportCountry], - iata: words[airportIATA], - lat: words[airportLatitude], - lng: words[airportLongitude], - }) - } - - return nil - }) - }) - - return -} - -func (db *DB) initCountries() (err error) { - db.countriesOnce.Do(func() { - db.mCountries = make(map[string]string) - - err = db.scanWords(db.countries, countryFldNum, func(words []string) error { - db.mCountries[words[countryName]] = words[countryISOCode] - - return nil - }) - }) - - return -} - -var errScanInt = errors.New("interrupt scan") - -func (db *DB) scanWords(pm pathMode, num int, wordsHandler func([]string) error) error { - tableFile, err := os.OpenFile(pm.path, os.O_RDONLY, pm.mode) - if err != nil { - return err - } - - defer tableFile.Close() - - r := csv.NewReader(tableFile) - r.ReuseRecord = true - - for { - words, err := r.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - - return err - } else if ln := len(words); ln != num { - return fmt.Errorf("unexpected number of words %d", ln) - } - - if err := wordsHandler(words); err != nil { - if errors.Is(err, errScanInt) { - break - } - - return err - } - } - - return nil -} diff --git a/pkg/util/locode/db/airports/db.go b/pkg/util/locode/db/airports/db.go deleted file mode 100644 index acfa3fd60..000000000 --- a/pkg/util/locode/db/airports/db.go +++ /dev/null @@ -1,83 +0,0 @@ -package airportsdb - -import ( - "fmt" - "io/fs" - "sync" -) - -// Prm groups the required parameters of the DB's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Path to OpenFlights Airport csv table. - // - // Must not be empty. - AirportsPath string - - // Path to OpenFlights Countries csv table. - // - // Must not be empty. - CountriesPath string -} - -// DB is a descriptor of the OpenFlights database in csv format. -// -// For correct operation, DB must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// The DB is immediately ready to work through API. -type DB struct { - airports, countries pathMode - - airportsOnce, countriesOnce sync.Once - - mCountries map[string]string - - mAirports map[string][]record -} - -type pathMode struct { - path string - mode fs.FileMode -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the DB. -// -// Panics if at least one value of the parameters is invalid. -// -// The created DB does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *DB { - switch { - case prm.AirportsPath == "": - panicOnPrmValue("AirportsPath", prm.AirportsPath) - case prm.CountriesPath == "": - panicOnPrmValue("CountriesPath", prm.CountriesPath) - } - - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - return &DB{ - airports: pathMode{ - path: prm.AirportsPath, - mode: o.airportMode, - }, - countries: pathMode{ - path: prm.CountriesPath, - mode: o.countryMode, - }, - } -} diff --git a/pkg/util/locode/db/airports/opts.go b/pkg/util/locode/db/airports/opts.go deleted file mode 100644 index 3799d9e27..000000000 --- a/pkg/util/locode/db/airports/opts.go +++ /dev/null @@ -1,19 +0,0 @@ -package airportsdb - -import ( - "io/fs" -) - -// Option sets an optional parameter of DB. -type Option func(*options) - -type options struct { - airportMode, countryMode fs.FileMode -} - -func defaultOpts() *options { - return &options{ - airportMode: fs.ModePerm, // 0777 - countryMode: fs.ModePerm, // 0777 - } -} diff --git a/pkg/util/locode/db/boltdb/calls.go b/pkg/util/locode/db/boltdb/calls.go deleted file mode 100644 index 6a80def3a..000000000 --- a/pkg/util/locode/db/boltdb/calls.go +++ /dev/null @@ -1,166 +0,0 @@ -package locodebolt - -import ( - "encoding/json" - "errors" - "fmt" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" - "go.etcd.io/bbolt" -) - -// Open opens an underlying BoltDB instance. -// -// Timeout of BoltDB opening is 3s (only for Linux or Darwin). -// -// Opens BoltDB in read-only mode if DB is read-only. -func (db *DB) Open() error { - // copy-paste from metabase: - // consider universal Open/Close for BoltDB wrappers - - err := util.MkdirAllX(filepath.Dir(db.path), db.mode) - if err != nil { - return fmt.Errorf("could not create dir for BoltDB: %w", err) - } - - db.bolt, err = bbolt.Open(db.path, db.mode, db.boltOpts) - if err != nil { - return fmt.Errorf("could not open BoltDB: %w", err) - } - - return nil -} - -// Close closes an underlying BoltDB instance. -// -// Must not be called before successful Open call. -func (db *DB) Close() error { - return db.bolt.Close() -} - -func countryBucketKey(cc *locodedb.CountryCode) ([]byte, error) { - return []byte(cc.String()), nil -} - -func locationBucketKey(lc *locodedb.LocationCode) ([]byte, error) { - return []byte(lc.String()), nil -} - -type recordJSON struct { - CountryName string - LocationName string - SubDivName string - SubDivCode string - Latitude float64 - Longitude float64 - Continent string -} - -func recordValue(r locodedb.Record) ([]byte, error) { - p := r.GeoPoint() - - rj := &recordJSON{ - CountryName: r.CountryName(), - LocationName: r.LocationName(), - SubDivName: r.SubDivName(), - SubDivCode: r.SubDivCode(), - Latitude: p.Latitude(), - Longitude: p.Longitude(), - Continent: r.Continent().String(), - } - - return json.Marshal(rj) -} - -func recordFromValue(data []byte) (*locodedb.Record, error) { - rj := new(recordJSON) - - if err := json.Unmarshal(data, rj); err != nil { - return nil, err - } - - r := new(locodedb.Record) - r.SetCountryName(rj.CountryName) - r.SetLocationName(rj.LocationName) - r.SetSubDivName(rj.SubDivName) - r.SetSubDivCode(rj.SubDivCode) - r.SetGeoPoint(locodedb.NewPoint(rj.Latitude, rj.Longitude)) - - cont := locodedb.ContinentFromString(rj.Continent) - r.SetContinent(&cont) - - return r, nil -} - -// Put saves the record by key in an underlying BoltDB instance. -// -// Country code from the key is used for allocating the 1st level buckets. -// Records are stored in country buckets by the location code from the key. -// The records are stored in internal binary JSON format. -// -// Must not be called before successful Open call. -// Must not be called in read-only mode: behavior is undefined. -func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error { - return db.bolt.Batch(func(tx *bbolt.Tx) error { - countryKey, err := countryBucketKey(key.CountryCode()) - if err != nil { - return err - } - - bktCountry, err := tx.CreateBucketIfNotExists(countryKey) - if err != nil { - return fmt.Errorf("could not create country bucket: %w", err) - } - - locationKey, err := locationBucketKey(key.LocationCode()) - if err != nil { - return err - } - - cont, err := recordValue(rec) - if err != nil { - return err - } - - return bktCountry.Put(locationKey, cont) - }) -} - -var errRecordNotFound = errors.New("record not found") - -// Get reads the record by key from underlying BoltDB instance. -// -// Returns an error if no record is presented by key in DB. -// -// Must not be called before successful Open call. -func (db *DB) Get(key locodedb.Key) (rec *locodedb.Record, err error) { - err = db.bolt.View(func(tx *bbolt.Tx) error { - countryKey, err := countryBucketKey(key.CountryCode()) - if err != nil { - return err - } - - bktCountry := tx.Bucket(countryKey) - if bktCountry == nil { - return errRecordNotFound - } - - locationKey, err := locationBucketKey(key.LocationCode()) - if err != nil { - return err - } - - data := bktCountry.Get(locationKey) - if data == nil { - return errRecordNotFound - } - - rec, err = recordFromValue(data) - - return err - }) - - return -} diff --git a/pkg/util/locode/db/boltdb/db.go b/pkg/util/locode/db/boltdb/db.go deleted file mode 100644 index 3d09a797d..000000000 --- a/pkg/util/locode/db/boltdb/db.go +++ /dev/null @@ -1,73 +0,0 @@ -package locodebolt - -import ( - "fmt" - "io/fs" - - "go.etcd.io/bbolt" -) - -// Prm groups the required parameters of the DB's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Path to BoltDB file with FrostFS location database. - // - // Must not be empty. - Path string -} - -// DB is a descriptor of the FrostFS BoltDB location database. -// -// For correct operation, DB must be created -// using the constructor (New) based on the required parameters -// and optional components. -// -// After successful creation, -// DB must be opened through Open call. After successful opening, -// DB is ready to work through API (until Close call). -// -// Upon completion of work with the DB, it must be closed -// by Close method. -type DB struct { - path string - - mode fs.FileMode - - boltOpts *bbolt.Options - - bolt *bbolt.DB -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the DB. -// -// Panics if at least one value of the parameters is invalid. -// -// The created DB requires calling the Open method in order -// to initialize required resources. -func New(prm Prm, opts ...Option) *DB { - switch { - case prm.Path == "": - panicOnPrmValue("Path", prm.Path) - } - - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - return &DB{ - path: prm.Path, - mode: o.mode, - boltOpts: o.boltOpts, - } -} diff --git a/pkg/util/locode/db/boltdb/opts.go b/pkg/util/locode/db/boltdb/opts.go deleted file mode 100644 index db0cccd3a..000000000 --- a/pkg/util/locode/db/boltdb/opts.go +++ /dev/null @@ -1,37 +0,0 @@ -package locodebolt - -import ( - "io/fs" - "os" - "time" - - "go.etcd.io/bbolt" -) - -// Option sets an optional parameter of DB. -type Option func(*options) - -type options struct { - mode fs.FileMode - - boltOpts *bbolt.Options -} - -func defaultOpts() *options { - return &options{ - mode: os.ModePerm, // 0777 - boltOpts: &bbolt.Options{ - Timeout: 3 * time.Second, - }, - } -} - -// ReadOnly enables read-only mode of the DB. -// -// Do not call DB.Put method on instances with -// this option: the behavior is undefined. -func ReadOnly() Option { - return func(o *options) { - o.boltOpts.ReadOnly = true - } -} diff --git a/pkg/util/locode/db/continent.go b/pkg/util/locode/db/continent.go deleted file mode 100644 index 863af7b57..000000000 --- a/pkg/util/locode/db/continent.go +++ /dev/null @@ -1,81 +0,0 @@ -package locodedb - -// Continent is an enumeration of Earth's continent. -type Continent uint8 - -const ( - // ContinentUnknown is an undefined Continent value. - ContinentUnknown = iota - - // ContinentEurope corresponds to Europe. - ContinentEurope - - // ContinentAfrica corresponds to Africa. - ContinentAfrica - - // ContinentNorthAmerica corresponds to North America. - ContinentNorthAmerica - - // ContinentSouthAmerica corresponds to South America. - ContinentSouthAmerica - - // ContinentAsia corresponds to Asia. - ContinentAsia - - // ContinentAntarctica corresponds to Antarctica. - ContinentAntarctica - - // ContinentOceania corresponds to Oceania. - ContinentOceania -) - -// Is checks if c is the same continent as c2. -func (c *Continent) Is(c2 Continent) bool { - return *c == c2 -} - -func (c Continent) String() string { - switch c { - case ContinentUnknown: - fallthrough - default: - return "Unknown" - case ContinentEurope: - return "Europe" - case ContinentAfrica: - return "Africa" - case ContinentNorthAmerica: - return "North America" - case ContinentSouthAmerica: - return "South America" - case ContinentAsia: - return "Asia" - case ContinentAntarctica: - return "Antarctica" - case ContinentOceania: - return "Oceania" - } -} - -// ContinentFromString returns Continent value -// corresponding to the passed string representation. -func ContinentFromString(str string) Continent { - switch str { - default: - return ContinentUnknown - case "Europe": - return ContinentEurope - case "Africa": - return ContinentAfrica - case "North America": - return ContinentNorthAmerica - case "South America": - return ContinentSouthAmerica - case "Asia": - return ContinentAsia - case "Antarctica": - return ContinentAntarctica - case "Oceania": - return ContinentOceania - } -} diff --git a/pkg/util/locode/db/continents/geojson/calls.go b/pkg/util/locode/db/continents/geojson/calls.go deleted file mode 100644 index 34467d5a2..000000000 --- a/pkg/util/locode/db/continents/geojson/calls.go +++ /dev/null @@ -1,98 +0,0 @@ -package continentsdb - -import ( - "fmt" - "os" - - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" - "github.com/paulmach/orb" - "github.com/paulmach/orb/geojson" - "github.com/paulmach/orb/planar" -) - -const continentProperty = "Continent" - -// PointContinent goes through all polygons and returns the continent -// in which the point is located. -// -// Returns locodedb.ContinentUnknown if no entry matches. -// -// All GeoJSON feature are parsed from file once and stored in memory. -func (db *DB) PointContinent(point *locodedb.Point) (*locodedb.Continent, error) { - var err error - - db.once.Do(func() { - err = db.init() - }) - - if err != nil { - return nil, err - } - - planarPoint := orb.Point{point.Longitude(), point.Latitude()} - - var ( - continent string - minDst float64 - ) - - for _, feature := range db.features { - if multiPolygon, ok := feature.Geometry.(orb.MultiPolygon); ok { - if planar.MultiPolygonContains(multiPolygon, planarPoint) { - continent = feature.Properties.MustString(continentProperty) - break - } - } else if polygon, ok := feature.Geometry.(orb.Polygon); ok { - if planar.PolygonContains(polygon, planarPoint) { - continent = feature.Properties.MustString(continentProperty) - break - } - } - distance := planar.DistanceFrom(feature.Geometry, planarPoint) - if minDst == 0 || minDst > distance { - minDst = distance - continent = feature.Properties.MustString(continentProperty) - } - } - - c := continentFromString(continent) - - return &c, nil -} - -func (db *DB) init() error { - data, err := os.ReadFile(db.path) - if err != nil { - return fmt.Errorf("could not read data file: %w", err) - } - - features, err := geojson.UnmarshalFeatureCollection(data) - if err != nil { - return fmt.Errorf("could not unmarshal GeoJSON feature collection: %w", err) - } - - db.features = features.Features - - return nil -} - -func continentFromString(c string) locodedb.Continent { - switch c { - default: - return locodedb.ContinentUnknown - case "Africa": - return locodedb.ContinentAfrica - case "Asia": - return locodedb.ContinentAsia - case "Europe": - return locodedb.ContinentEurope - case "North America": - return locodedb.ContinentNorthAmerica - case "South America": - return locodedb.ContinentSouthAmerica - case "Antarctica": - return locodedb.ContinentAntarctica - case "Australia", "Oceania": - return locodedb.ContinentOceania - } -} diff --git a/pkg/util/locode/db/continents/geojson/db.go b/pkg/util/locode/db/continents/geojson/db.go deleted file mode 100644 index ee43bd810..000000000 --- a/pkg/util/locode/db/continents/geojson/db.go +++ /dev/null @@ -1,63 +0,0 @@ -package continentsdb - -import ( - "fmt" - "sync" - - "github.com/paulmach/orb/geojson" -) - -// Prm groups the required parameters of the DB's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Path to polygons of Earth's continents in GeoJSON format. - // - // Must not be empty. - Path string -} - -// DB is a descriptor of the Earth's polygons in GeoJSON format. -// -// For correct operation, DB must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// The DB is immediately ready to work through API. -type DB struct { - path string - - once sync.Once - - features []*geojson.Feature -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the DB. -// -// Panics if at least one value of the parameters is invalid. -// -// The created DB does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *DB { - switch { - case prm.Path == "": - panicOnPrmValue("Path", prm.Path) - } - - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - return &DB{ - path: prm.Path, - } -} diff --git a/pkg/util/locode/db/continents/geojson/opts.go b/pkg/util/locode/db/continents/geojson/opts.go deleted file mode 100644 index 59831fcc5..000000000 --- a/pkg/util/locode/db/continents/geojson/opts.go +++ /dev/null @@ -1,10 +0,0 @@ -package continentsdb - -// Option sets an optional parameter of DB. -type Option func(*options) - -type options struct{} - -func defaultOpts() *options { - return &options{} -} diff --git a/pkg/util/locode/db/country.go b/pkg/util/locode/db/country.go deleted file mode 100644 index 2d13c6ef9..000000000 --- a/pkg/util/locode/db/country.go +++ /dev/null @@ -1,32 +0,0 @@ -package locodedb - -import ( - "fmt" - - locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column" -) - -// CountryCode represents a country code for -// the storage in the FrostFS location database. -type CountryCode locodecolumn.CountryCode - -// CountryCodeFromString parses a string UN/LOCODE country code -// and returns a CountryCode. -func CountryCodeFromString(s string) (*CountryCode, error) { - cc, err := locodecolumn.CountryCodeFromString(s) - if err != nil { - return nil, fmt.Errorf("could not parse country code: %w", err) - } - - return CountryFromColumn(cc) -} - -// CountryFromColumn converts a UN/LOCODE country code to a CountryCode. -func CountryFromColumn(cc *locodecolumn.CountryCode) (*CountryCode, error) { - return (*CountryCode)(cc), nil -} - -func (c *CountryCode) String() string { - syms := (*locodecolumn.CountryCode)(c).Symbols() - return string(syms[:]) -} diff --git a/pkg/util/locode/db/db.go b/pkg/util/locode/db/db.go deleted file mode 100644 index 8c71ea794..000000000 --- a/pkg/util/locode/db/db.go +++ /dev/null @@ -1,183 +0,0 @@ -package locodedb - -import ( - "errors" - "fmt" - "runtime" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - "golang.org/x/sync/errgroup" -) - -// SourceTable is an interface of the UN/LOCODE table. -type SourceTable interface { - // Must iterate over all entries of the table - // and pass next entry to the handler. - // - // Must return handler's errors directly. - IterateAll(func(locode.Record) error) error -} - -// DB is an interface of FrostFS location database. -type DB interface { - // Must save the record by key in the database. - Put(Key, Record) error - - // Must return the record by key from the database. - Get(Key) (*Record, error) -} - -// AirportRecord represents the entry in FrostFS airport database. -type AirportRecord struct { - // Name of the country where airport is located. - CountryName string - - // Geo point where airport is located. - Point *Point -} - -// ErrAirportNotFound is returned by AirportRecord readers -// when the required airport is not found. -var ErrAirportNotFound = errors.New("airport not found") - -// AirportDB is an interface of FrostFS airport database. -type AirportDB interface { - // Must return the record by UN/LOCODE table record. - // - // Must return ErrAirportNotFound if there is no - // related airport in the database. - Get(locode.Record) (*AirportRecord, error) -} - -// ContinentsDB is an interface of FrostFS continent database. -type ContinentsDB interface { - // Must return continent of the geo point. - PointContinent(*Point) (*Continent, error) -} - -var ErrSubDivNotFound = errors.New("subdivision not found") - -var ErrCountryNotFound = errors.New("country not found") - -// NamesDB is an interface of the FrostFS location namespace. -type NamesDB interface { - // Must resolve a country code to a country name. - // - // Must return ErrCountryNotFound if there is no - // country with the provided code. - CountryName(*CountryCode) (string, error) - - // Must resolve (country code, subdivision code) to - // a subdivision name. - // - // Must return ErrSubDivNotFound if either country or - // subdivision is not presented in database. - SubDivName(*CountryCode, string) (string, error) -} - -// FillDatabase generates the FrostFS location database based on the UN/LOCODE table. -func FillDatabase(table SourceTable, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error { - var errG errgroup.Group - - // Pick some sane default, after this the performance stopped increasing. - errG.SetLimit(runtime.NumCPU() * 4) - _ = table.IterateAll(func(tableRecord locode.Record) error { - errG.Go(func() error { - return processTableRecord(tableRecord, airports, continents, names, db) - }) - return nil - }) - return errG.Wait() -} - -func processTableRecord(tableRecord locode.Record, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error { - if tableRecord.LOCODE.LocationCode() == "" { - return nil - } - - dbKey, err := NewKey(tableRecord.LOCODE) - if err != nil { - return err - } - - dbRecord, err := NewRecord(tableRecord) - if err != nil { - if errors.Is(err, errParseCoordinates) { - return nil - } - - return err - } - - geoPoint := dbRecord.GeoPoint() - countryName := "" - - if geoPoint == nil { - airportRecord, err := airports.Get(tableRecord) - if err != nil { - if errors.Is(err, ErrAirportNotFound) { - return nil - } - - return err - } - - geoPoint = airportRecord.Point - countryName = airportRecord.CountryName - } - - dbRecord.SetGeoPoint(geoPoint) - - if countryName == "" { - countryName, err = names.CountryName(dbKey.CountryCode()) - if err != nil { - if errors.Is(err, ErrCountryNotFound) { - return nil - } - - return err - } - } - - dbRecord.SetCountryName(countryName) - - if subDivCode := dbRecord.SubDivCode(); subDivCode != "" { - subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode) - if err != nil { - if errors.Is(err, ErrSubDivNotFound) { - return nil - } - - return err - } - - dbRecord.SetSubDivName(subDivName) - } - - continent, err := continents.PointContinent(geoPoint) - if err != nil { - return fmt.Errorf("could not calculate continent geo point: %w", err) - } else if continent.Is(ContinentUnknown) { - return nil - } - - dbRecord.SetContinent(continent) - - return db.Put(*dbKey, *dbRecord) -} - -// LocodeRecord returns the record from the FrostFS location database -// corresponding to the string representation of UN/LOCODE. -func LocodeRecord(db DB, sLocode string) (*Record, error) { - lc, err := locode.FromString(sLocode) - if err != nil { - return nil, fmt.Errorf("could not parse locode: %w", err) - } - - key, err := NewKey(*lc) - if err != nil { - return nil, err - } - - return db.Get(*key) -} diff --git a/pkg/util/locode/db/location.go b/pkg/util/locode/db/location.go deleted file mode 100644 index d22979170..000000000 --- a/pkg/util/locode/db/location.go +++ /dev/null @@ -1,32 +0,0 @@ -package locodedb - -import ( - "fmt" - - locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column" -) - -// LocationCode represents a location code for -// the storage in the FrostFS location database. -type LocationCode locodecolumn.LocationCode - -// LocationCodeFromString parses a string UN/LOCODE location code -// and returns a LocationCode. -func LocationCodeFromString(s string) (*LocationCode, error) { - lc, err := locodecolumn.LocationCodeFromString(s) - if err != nil { - return nil, fmt.Errorf("could not parse location code: %w", err) - } - - return LocationFromColumn(lc) -} - -// LocationFromColumn converts a UN/LOCODE country code to a LocationCode. -func LocationFromColumn(cc *locodecolumn.LocationCode) (*LocationCode, error) { - return (*LocationCode)(cc), nil -} - -func (l *LocationCode) String() string { - syms := (*locodecolumn.LocationCode)(l).Symbols() - return string(syms[:]) -} diff --git a/pkg/util/locode/db/point.go b/pkg/util/locode/db/point.go deleted file mode 100644 index 72daebb2c..000000000 --- a/pkg/util/locode/db/point.go +++ /dev/null @@ -1,93 +0,0 @@ -package locodedb - -import ( - "fmt" - "strconv" - - locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column" -) - -// Point represents a 2D geographic point. -type Point struct { - lat, lng float64 -} - -// NewPoint creates, initializes and returns a new Point. -func NewPoint(lat, lng float64) *Point { - return &Point{ - lat: lat, - lng: lng, - } -} - -// Latitude returns the Point's latitude. -func (p Point) Latitude() float64 { - return p.lat -} - -// Longitude returns the Point's longitude. -func (p Point) Longitude() float64 { - return p.lng -} - -// PointFromCoordinates converts a UN/LOCODE coordinates to a Point. -func PointFromCoordinates(crd *locodecolumn.Coordinates) (*Point, error) { - if crd == nil { - return nil, nil - } - - cLat := crd.Latitude() - cLatDeg := cLat.Degrees() - cLatMnt := cLat.Minutes() - - lat, err := toDecimal(cLatDeg[:], cLatMnt[:]) - if err != nil { - return nil, fmt.Errorf("could not parse latitude: %w", err) - } - - if !cLat.Hemisphere().North() { - lat = -lat - } - - cLng := crd.Longitude() - cLngDeg := cLng.Degrees() - cLngMnt := cLng.Minutes() - - lng, err := toDecimal(cLngDeg[:], cLngMnt[:]) - if err != nil { - return nil, fmt.Errorf("could not parse longitude: %w", err) - } - - if !cLng.Hemisphere().East() { - lng = -lng - } - - return &Point{ - lat: lat, - lng: lng, - }, nil -} - -func toDecimal(intRaw, minutesRaw []byte) (float64, error) { - integer, err := strconv.ParseFloat(string(intRaw), 64) - if err != nil { - return 0, fmt.Errorf("could not parse integer part: %w", err) - } - - decimal, err := minutesToDegrees(minutesRaw) - if err != nil { - return 0, fmt.Errorf("could not parse decimal part: %w", err) - } - - return integer + decimal, nil -} - -// minutesToDegrees converts minutes to decimal part of a degree. -func minutesToDegrees(raw []byte) (float64, error) { - minutes, err := strconv.ParseFloat(string(raw), 64) - if err != nil { - return 0, err - } - - return minutes / 60, nil -} diff --git a/pkg/util/locode/db/point_test.go b/pkg/util/locode/db/point_test.go deleted file mode 100644 index f91c0cf87..000000000 --- a/pkg/util/locode/db/point_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package locodedb - -import ( - "testing" - - locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column" - "github.com/stretchr/testify/require" -) - -func TestPointFromCoordinates(t *testing.T) { - testCases := []struct { - latGot, longGot string - latWant, longWant float64 - }{ - { - latGot: "5915N", - longGot: "01806E", - latWant: 59.25, - longWant: 18.10, - }, - { - latGot: "1000N", - longGot: "02030E", - latWant: 10.00, - longWant: 20.50, - }, - { - latGot: "0145S", - longGot: "03512W", - latWant: -01.75, - longWant: -35.20, - }, - } - - var ( - crd *locodecolumn.Coordinates - point *Point - err error - ) - - for _, test := range testCases { - crd, err = locodecolumn.CoordinatesFromString(test.latGot + " " + test.longGot) - require.NoError(t, err) - - point, err = PointFromCoordinates(crd) - require.NoError(t, err) - - require.Equal(t, test.latWant, point.Latitude()) - require.Equal(t, test.longWant, point.Longitude()) - } -} diff --git a/pkg/util/locode/db/record.go b/pkg/util/locode/db/record.go deleted file mode 100644 index 4c414079f..000000000 --- a/pkg/util/locode/db/record.go +++ /dev/null @@ -1,140 +0,0 @@ -package locodedb - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column" -) - -// Key represents the key in FrostFS location database. -type Key struct { - cc *CountryCode - - lc *LocationCode -} - -// NewKey calculates Key from LOCODE. -func NewKey(lc locode.LOCODE) (*Key, error) { - country, err := CountryCodeFromString(lc.CountryCode()) - if err != nil { - return nil, fmt.Errorf("could not parse country: %w", err) - } - - location, err := LocationCodeFromString(lc.LocationCode()) - if err != nil { - return nil, fmt.Errorf("could not parse location: %w", err) - } - - return &Key{ - cc: country, - lc: location, - }, nil -} - -// CountryCode returns the location's country code. -func (k *Key) CountryCode() *CountryCode { - return k.cc -} - -// LocationCode returns the location code. -func (k *Key) LocationCode() *LocationCode { - return k.lc -} - -// Record represents the entry in FrostFS location database. -type Record struct { - countryName string - - locationName string - - subDivName string - - subDivCode string - - p *Point - - cont *Continent -} - -var errParseCoordinates = errors.New("invalid coordinates") - -// NewRecord calculates the Record from the UN/LOCODE table record. -func NewRecord(r locode.Record) (*Record, error) { - crd, err := locodecolumn.CoordinatesFromString(r.Coordinates) - if err != nil { - return nil, fmt.Errorf("%w: %v", errParseCoordinates, err) - } - - point, err := PointFromCoordinates(crd) - if err != nil { - return nil, fmt.Errorf("could not parse geo point: %w", err) - } - - return &Record{ - locationName: r.NameWoDiacritics, - subDivCode: r.SubDiv, - p: point, - }, nil -} - -// CountryName returns the country name. -func (r *Record) CountryName() string { - return r.countryName -} - -// SetCountryName sets the country name. -func (r *Record) SetCountryName(name string) { - r.countryName = name -} - -// LocationName returns the location name. -func (r *Record) LocationName() string { - return r.locationName -} - -// SetLocationName sets the location name. -func (r *Record) SetLocationName(name string) { - r.locationName = name -} - -// SubDivCode returns the subdivision code. -func (r *Record) SubDivCode() string { - return r.subDivCode -} - -// SetSubDivCode sets the subdivision code. -func (r *Record) SetSubDivCode(name string) { - r.subDivCode = name -} - -// SubDivName returns the subdivision name. -func (r *Record) SubDivName() string { - return r.subDivName -} - -// SetSubDivName sets the subdivision name. -func (r *Record) SetSubDivName(name string) { - r.subDivName = name -} - -// GeoPoint returns geo point of the location. -func (r *Record) GeoPoint() *Point { - return r.p -} - -// SetGeoPoint sets geo point of the location. -func (r *Record) SetGeoPoint(p *Point) { - r.p = p -} - -// Continent returns the location continent. -func (r *Record) Continent() *Continent { - return r.cont -} - -// SetContinent sets the location continent. -func (r *Record) SetContinent(c *Continent) { - r.cont = c -} diff --git a/pkg/util/locode/record.go b/pkg/util/locode/record.go deleted file mode 100644 index 7db746ff3..000000000 --- a/pkg/util/locode/record.go +++ /dev/null @@ -1,83 +0,0 @@ -package locode - -import ( - "errors" - "fmt" - "strings" -) - -// LOCODE represents code from UN/LOCODE coding scheme. -type LOCODE [2]string - -// Record represents a single record of the UN/LOCODE table. -type Record struct { - // Change Indicator. - Ch string - - // Combination of a 2-character country code and a 3-character location code. - LOCODE LOCODE - - // Name of the locations which has been allocated a UN/LOCODE. - Name string - - // Names of the locations which have been allocated a UN/LOCODE without diacritic signs. - NameWoDiacritics string - - // ISO 1-3 character alphabetic and/or numeric code for the administrative division of the country concerned. - SubDiv string - - // 8-digit function classifier code for the location. - Function string - - // Status of the entry by a 2-character code. - Status string - - // Last date when the location was updated/entered. - Date string - - // The IATA code for the location if different from location code in column LOCODE. - IATA string - - // Geographical coordinates (latitude/longitude) of the location, if there is any. - Coordinates string - - // Some general remarks regarding the UN/LOCODE in question. - Remarks string -} - -// ErrInvalidString is the error of incorrect string format of the LOCODE. -var ErrInvalidString = errors.New("invalid string format in UN/Locode") - -// FromString parses string and returns LOCODE. -// -// If string has incorrect format, ErrInvalidString returns. -func FromString(s string) (*LOCODE, error) { - const ( - locationSeparator = " " - locodePartsNumber = 2 - ) - - words := strings.Split(s, locationSeparator) - if ln := len(words); ln != locodePartsNumber { - return nil, fmt.Errorf( - "incorrect locode: it must consist of %d codes separated with a witespase, got: %d", - locodePartsNumber, - ln, - ) - } - - l := new(LOCODE) - copy(l[:], words) - - return l, nil -} - -// CountryCode returns a string representation of country code. -func (l *LOCODE) CountryCode() string { - return l[0] -} - -// LocationCode returns a string representation of location code. -func (l *LOCODE) LocationCode() string { - return l[1] -} diff --git a/pkg/util/locode/table/csv/calls.go b/pkg/util/locode/table/csv/calls.go deleted file mode 100644 index 5f40865be..000000000 --- a/pkg/util/locode/table/csv/calls.go +++ /dev/null @@ -1,156 +0,0 @@ -package csvlocode - -import ( - "encoding/csv" - "errors" - "io" - "os" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db" -) - -var errInvalidRecord = errors.New("invalid table record") - -// IterateAll scans a table record one-by-one, parses a UN/LOCODE record -// from it and passes it to f. -// -// Returns f's errors directly. -func (t *Table) IterateAll(f func(locode.Record) error) error { - const wordsPerRecord = 12 - - return t.scanWords(t.paths, wordsPerRecord, func(words []string) error { - lc, err := locode.FromString(strings.Join(words[1:3], " ")) - if err != nil { - return err - } - - record := locode.Record{ - Ch: words[0], - LOCODE: *lc, - Name: words[3], - NameWoDiacritics: words[4], - SubDiv: words[5], - Function: words[6], - Status: words[7], - Date: words[8], - IATA: words[9], - Coordinates: words[10], - Remarks: words[11], - } - - return f(record) - }) -} - -const ( - _ = iota - 1 - - subDivCountry - subDivSubdivision - subDivName - _ // subDivLevel - - subDivFldNum -) - -type subDivKey struct { - countryCode, - subDivCode string -} - -type subDivRecord struct { - name string -} - -// SubDivName scans a table record to an in-memory table (once), -// and returns the subdivision name of the country and the subdivision codes match. -// -// Returns locodedb.ErrSubDivNotFound if no entry matches. -func (t *Table) SubDivName(countryCode *locodedb.CountryCode, code string) (string, error) { - if err := t.initSubDiv(); err != nil { - return "", err - } - - rec, ok := t.mSubDiv[subDivKey{ - countryCode: countryCode.String(), - subDivCode: code, - }] - if !ok { - return "", locodedb.ErrSubDivNotFound - } - - return rec.name, nil -} - -func (t *Table) initSubDiv() (err error) { - t.subDivOnce.Do(func() { - t.mSubDiv = make(map[subDivKey]subDivRecord) - - err = t.scanWords([]string{t.subDivPath}, subDivFldNum, func(words []string) error { - t.mSubDiv[subDivKey{ - countryCode: words[subDivCountry], - subDivCode: words[subDivSubdivision], - }] = subDivRecord{ - name: words[subDivName], - } - - return nil - }) - }) - - return -} - -var errScanInt = errors.New("interrupt scan") - -func (t *Table) scanWords(paths []string, fpr int, wordsHandler func([]string) error) error { - var ( - rdrs = make([]io.Reader, 0, len(t.paths)) - closers = make([]io.Closer, 0, len(t.paths)) - ) - - for i := range paths { - file, err := os.OpenFile(paths[i], os.O_RDONLY, t.mode) - if err != nil { - return err - } - - rdrs = append(rdrs, file) - closers = append(closers, file) - } - - defer func() { - for i := range closers { - _ = closers[i].Close() - } - }() - - r := csv.NewReader(io.MultiReader(rdrs...)) - r.ReuseRecord = true - r.FieldsPerRecord = fpr - - for { - words, err := r.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - - return err - } else if len(words) != fpr { - return errInvalidRecord - } - - if err := wordsHandler(words); err != nil { - if errors.Is(err, errScanInt) { - break - } - - return err - } - } - - return nil -} diff --git a/pkg/util/locode/table/csv/opts.go b/pkg/util/locode/table/csv/opts.go deleted file mode 100644 index 68e442899..000000000 --- a/pkg/util/locode/table/csv/opts.go +++ /dev/null @@ -1,28 +0,0 @@ -package csvlocode - -import ( - "io/fs" -) - -// Option sets an optional parameter of Table. -type Option func(*options) - -type options struct { - mode fs.FileMode - - extraPaths []string -} - -func defaultOpts() *options { - return &options{ - mode: 0o700, - } -} - -// WithExtraPaths returns an option to add extra paths -// to UN/LOCODE tables in csv format. -func WithExtraPaths(ps ...string) Option { - return func(o *options) { - o.extraPaths = append(o.extraPaths, ps...) - } -} diff --git a/pkg/util/locode/table/csv/table.go b/pkg/util/locode/table/csv/table.go deleted file mode 100644 index b84c2b705..000000000 --- a/pkg/util/locode/table/csv/table.go +++ /dev/null @@ -1,75 +0,0 @@ -package csvlocode - -import ( - "fmt" - "io/fs" - "sync" -) - -// Prm groups the required parameters of the Table's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Path to UN/LOCODE csv table. - // - // Must not be empty. - Path string - - // Path to csv table of UN/LOCODE Subdivisions. - // - // Must not be empty. - SubDivPath string -} - -// Table is a descriptor of the UN/LOCODE table in csv format. -// -// For correct operation, Table must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// The Table is immediately ready to work through API. -type Table struct { - paths []string - - mode fs.FileMode - - subDivPath string - - subDivOnce sync.Once - - mSubDiv map[subDivKey]subDivRecord -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Table. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Table does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *Table { - switch { - case prm.Path == "": - panicOnPrmValue("Path", prm.Path) - case prm.SubDivPath == "": - panicOnPrmValue("SubDivPath", prm.SubDivPath) - } - - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - return &Table{ - paths: append(o.extraPaths, prm.Path), - mode: o.mode, - subDivPath: prm.SubDivPath, - } -} diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go new file mode 100644 index 000000000..413b1d9aa --- /dev/null +++ b/pkg/util/logger/log.go @@ -0,0 +1,35 @@ +package logger + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "go.uber.org/zap" +) + +func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { + l.z.Debug(msg, appendContext(ctx, fields...)...) +} + +func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { + l.z.Info(msg, appendContext(ctx, fields...)...) +} + +func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { + l.z.Warn(msg, appendContext(ctx, fields...)...) +} + +func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { + l.z.Error(msg, appendContext(ctx, fields...)...) +} + +func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + fields = append(fields, zap.String("trace_id", traceID)) + } + if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { + fields = append(fields, zap.String("io_tag", ioTag)) + } + return fields +} diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index e67afb36b..a1998cb1a 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -2,6 +2,7 @@ package logger import ( "fmt" + "time" "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/ssgreg/journald" @@ -12,8 +13,10 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - *zap.Logger - lvl zap.AtomicLevel + z *zap.Logger + c zapcore.Core + t Tag + w bool } // Prm groups Logger's parameters. @@ -22,16 +25,8 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// Passing Prm after a successful connection via the NewLogger, connects -// the Prm to a new instance of the Logger. -// -// See also Reload, SetLevelString. +// See also Logger.Reload, SetLevelString. type Prm struct { - // link to the created Logger - // instance; used for a runtime - // reconfiguration - _log *Logger - // support runtime rereading level zapcore.Level @@ -40,6 +35,15 @@ type Prm struct { // do not support runtime rereading dest string + + // PrependTimestamp specifies whether to prepend a timestamp in the log + PrependTimestamp bool + + // Options for zap.Logger + Options []zap.Option + + // map of tag's bit masks to log level, overrides lvl + tl map[Tag]zapcore.Level } const ( @@ -69,20 +73,10 @@ func (p *Prm) SetDestination(d string) error { return nil } -// Reload reloads configuration of a connected instance of the Logger. -// Returns ErrLoggerNotConnected if no connection has been performed. -// Returns any reconfiguration error from the Logger directly. -func (p Prm) Reload() error { - if p._log == nil { - // incorrect logger usage - panic("parameters are not connected to any Logger") - } - - return p._log.reload(p) -} - -func defaultPrm() *Prm { - return new(Prm) +// SetTags parses list of tags with log level. +func (p *Prm) SetTags(tags [][]string) (err error) { + p.tl, err = parseTags(tags) + return err } // NewLogger constructs a new zap logger instance. Constructing with nil @@ -96,10 +90,7 @@ func defaultPrm() *Prm { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm *Prm) (*Logger, error) { - if prm == nil { - prm = defaultPrm() - } +func NewLogger(prm Prm) (*Logger, error) { switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -110,59 +101,143 @@ func NewLogger(prm *Prm) (*Logger, error) { } } -func newConsoleLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newConsoleLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl + c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) c.Encoding = "console" - c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } - lZap, err := c.Build( + if prm.PrependTimestamp { + c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } else { + c.EncoderConfig.TimeKey = "" + } + + opts := []zap.Option{ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), - ) + zap.AddCallerSkip(1), + } + opts = append(opts, prm.Options...) + lZap, err := c.Build(opts...) if err != nil { return nil, err } - - l := &Logger{Logger: lZap, lvl: lvl} - prm._log = l + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func newJournaldLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newJournaldLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl - c.Encoding = "console" - c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } + if prm.PrependTimestamp { + c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } else { + c.EncoderConfig.TimeKey = "" + } + encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), zapjournald.SyslogPid(), }) - lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))) - - l := &Logger{Logger: lZap, lvl: lvl} - prm._log = l + var samplerOpts []zapcore.SamplerOption + if c.Sampling.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) + } + samplingCore := zapcore.NewSamplerWithOptions( + coreWithContext, + time.Second, + c.Sampling.Initial, + c.Sampling.Thereafter, + samplerOpts..., + ) + opts := []zap.Option{ + zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), + zap.AddCallerSkip(1), + } + opts = append(opts, prm.Options...) + lZap := zap.New(samplingCore, opts...) + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func (l *Logger) reload(prm Prm) error { - l.lvl.SetLevel(prm.level) - return nil +// With create a child logger with new fields, don't affect the parent. +// Throws panic if tag is unset. +func (l *Logger) With(fields ...zap.Field) *Logger { + if l.t == 0 { + panic("tag is unset") + } + c := *l + c.z = l.z.With(fields...) + // With called under the logger + c.w = true + return &c +} + +type core struct { + c zapcore.Core + l zap.AtomicLevel +} + +func (c *core) Enabled(lvl zapcore.Level) bool { + return c.l.Enabled(lvl) +} + +func (c *core) With(fields []zapcore.Field) zapcore.Core { + clone := *c + clone.c = clone.c.With(fields) + return &clone +} + +func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return c.c.Check(e, ce) +} + +func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { + return c.c.Write(e, fields) +} + +func (c *core) Sync() error { + return c.c.Sync() +} + +// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. +// Throws panic if provided unsupported tag. +func (l *Logger) WithTag(tag Tag) *Logger { + if tag == 0 || tag > Tag(len(_Tag_index)-1) { + panic("unsupported tag " + tag.String()) + } + if l.w { + panic("unsupported operation for the logger's state") + } + c := *l + c.t = tag + c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { + return &core{ + c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), + l: tagToLogLevel[tag], + } + })) + return &c +} + +func NewLoggerWrapper(z *zap.Logger) *Logger { + return &Logger{ + z: z.WithOptions(zap.AddCallerSkip(1)), + t: TagMain, + c: z.Core(), + } } diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go new file mode 100644 index 000000000..b867ee6cc --- /dev/null +++ b/pkg/util/logger/logger_test.go @@ -0,0 +1,118 @@ +package logger + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func BenchmarkLogger(b *testing.B) { + ctx := context.Background() + m := map[string]Prm{} + + prm := Prm{} + require.NoError(b, prm.SetLevelString("debug")) + m["logging enabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + m["logging disabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) + m["logging enabled via tags"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("debug")) + require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) + m["logging disabled via tags"] = prm + + for k, v := range m { + b.Run(k, func(b *testing.B) { + logger, err := createLogger(v) + require.NoError(b, err) + UpdateLevelForTags(v) + b.ResetTimer() + b.ReportAllocs() + for range b.N { + logger.Info(ctx, "test info") + } + }) + } +} + +type testCore struct { + core zapcore.Core +} + +func (c *testCore) Enabled(lvl zapcore.Level) bool { + return c.core.Enabled(lvl) +} + +func (c *testCore) With(fields []zapcore.Field) zapcore.Core { + c.core = c.core.With(fields) + return c +} + +func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return ce.AddCore(e, c) +} + +func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { + return nil +} + +func (c *testCore) Sync() error { + return c.core.Sync() +} + +func createLogger(prm Prm) (*Logger, error) { + prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + tc := testCore{core: core} + return &tc + })} + return NewLogger(prm) +} + +func TestLoggerOutput(t *testing.T) { + obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) + + prm := Prm{} + require.NoError(t, prm.SetLevelString("debug")) + prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { + return obs + })} + loggerMain, err := NewLogger(prm) + require.NoError(t, err) + UpdateLevelForTags(prm) + + loggerMainWith := loggerMain.With(zap.String("key", "value")) + + require.Panics(t, func() { + loggerMainWith.WithTag(TagShard) + }) + loggerShard := loggerMain.WithTag(TagShard) + loggerShard = loggerShard.With(zap.String("key1", "value1")) + + loggerMorph := loggerMain.WithTag(TagMorph) + loggerMorph = loggerMorph.With(zap.String("key2", "value2")) + + ctx := context.Background() + loggerMain.Debug(ctx, "main") + loggerMainWith.Debug(ctx, "main with") + loggerShard.Debug(ctx, "shard") + loggerMorph.Debug(ctx, "morph") + + require.Len(t, logs.All(), 4) + require.Len(t, logs.FilterFieldKey("key").All(), 1) + require.Len(t, logs.FilterFieldKey("key1").All(), 1) + require.Len(t, logs.FilterFieldKey("key2").All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) + require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) +} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result new file mode 100644 index 000000000..612fa2967 --- /dev/null +++ b/pkg/util/logger/logger_test.result @@ -0,0 +1,46 @@ +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger +cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz +BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go new file mode 100644 index 000000000..1b98f2e62 --- /dev/null +++ b/pkg/util/logger/tag_string.go @@ -0,0 +1,43 @@ +// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. + +package logger + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TagMain-1] + _ = x[TagMorph-2] + _ = x[TagGrpcSvc-3] + _ = x[TagIr-4] + _ = x[TagProcessor-5] + _ = x[TagEngine-6] + _ = x[TagBlobovnicza-7] + _ = x[TagBlobovniczaTree-8] + _ = x[TagBlobstor-9] + _ = x[TagFSTree-10] + _ = x[TagGC-11] + _ = x[TagShard-12] + _ = x[TagWriteCache-13] + _ = x[TagDeleteSvc-14] + _ = x[TagGetSvc-15] + _ = x[TagSearchSvc-16] + _ = x[TagSessionSvc-17] + _ = x[TagTreeSvc-18] + _ = x[TagPolicer-19] + _ = x[TagReplicator-20] +} + +const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" + +var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} + +func (i Tag) String() string { + i -= 1 + if i >= Tag(len(_Tag_index)-1) { + return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] +} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go new file mode 100644 index 000000000..a5386707e --- /dev/null +++ b/pkg/util/logger/tags.go @@ -0,0 +1,94 @@ +package logger + +import ( + "fmt" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +//go:generate stringer -type Tag -linecomment + +type Tag uint8 + +const ( + _ Tag = iota // + TagMain // main + TagMorph // morph + TagGrpcSvc // grpcsvc + TagIr // ir + TagProcessor // processor + TagEngine // engine + TagBlobovnicza // blobovnicza + TagBlobovniczaTree // blobovniczatree + TagBlobstor // blobstor + TagFSTree // fstree + TagGC // gc + TagShard // shard + TagWriteCache // writecache + TagDeleteSvc // deletesvc + TagGetSvc // getsvc + TagSearchSvc // searchsvc + TagSessionSvc // sessionsvc + TagTreeSvc // treesvc + TagPolicer // policer + TagReplicator // replicator + + defaultLevel = zapcore.InfoLevel +) + +var ( + tagToLogLevel = map[Tag]zap.AtomicLevel{} + stringToTag = map[string]Tag{} +) + +func init() { + for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { + tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) + stringToTag[i.String()] = i + } +} + +// parseTags returns: +// - map(always instantiated) of tag to custom log level for that tag; +// - error if it occurred(map is empty). +func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { + m := make(map[Tag]zapcore.Level) + if len(raw) == 0 { + return m, nil + } + for _, item := range raw { + str, level := item[0], item[1] + if len(level) == 0 { + // It is not necessary to parse tags without level, + // because default log level will be used. + continue + } + var l zapcore.Level + err := l.UnmarshalText([]byte(level)) + if err != nil { + return nil, err + } + tmp := strings.Split(str, ",") + for _, tagStr := range tmp { + tag, ok := stringToTag[strings.TrimSpace(tagStr)] + if !ok { + return nil, fmt.Errorf("unsupported tag %s", str) + } + m[tag] = l + } + } + return m, nil +} + +func UpdateLevelForTags(prm Prm) { + for k, v := range tagToLogLevel { + nk, ok := prm.tl[k] + if ok { + v.SetLevel(nk) + } else { + v.SetLevel(prm.level) + } + } +} diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go index f93756d17..b5b0a31eb 100644 --- a/pkg/util/logger/test/logger.go +++ b/pkg/util/logger/test/logger.go @@ -11,9 +11,10 @@ import ( // NewLogger creates a new logger. func NewLogger(t testing.TB) *logger.Logger { - var l logger.Logger - l.Logger = zaptest.NewLogger(t, - zaptest.Level(zapcore.DebugLevel), - zaptest.WrapOptions(zap.Development(), zap.AddCaller())) - return &l + return logger.NewLoggerWrapper( + zaptest.NewLogger(t, + zaptest.Level(zapcore.DebugLevel), + zaptest.WrapOptions(zap.Development(), zap.AddCaller()), + ), + ) } diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go index 97508f82a..a06296a07 100644 --- a/pkg/util/rand/rand.go +++ b/pkg/util/rand/rand.go @@ -13,7 +13,7 @@ func Uint64() uint64 { return source.Uint64() } -// Uint64 returns a random uint32 value. +// Uint32 returns a random uint32 value. func Uint32() uint32 { return source.Uint32() } diff --git a/pkg/util/sdnotify/clock.go b/pkg/util/sdnotify/clock.go deleted file mode 100644 index f5419d027..000000000 --- a/pkg/util/sdnotify/clock.go +++ /dev/null @@ -1,10 +0,0 @@ -package sdnotify - -import ( - // For go:linkname to work. - _ "unsafe" -) - -//go:noescape -//go:linkname nanotime runtime.nanotime -func nanotime() int64 diff --git a/pkg/util/sdnotify/clock.s b/pkg/util/sdnotify/clock.s deleted file mode 100644 index ad033ff4f..000000000 --- a/pkg/util/sdnotify/clock.s +++ /dev/null @@ -1,2 +0,0 @@ -// The file is intentionally empty. -// It is a workaround for https://github.com/golang/go/issues/15006 diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go index 16a3f11c1..bd15d0e8f 100644 --- a/pkg/util/sdnotify/sdnotify.go +++ b/pkg/util/sdnotify/sdnotify.go @@ -5,7 +5,10 @@ import ( "fmt" "net" "os" + "strconv" "strings" + + "golang.org/x/sys/unix" ) const ( @@ -21,7 +24,7 @@ var ( errSocketIsNotInitialized = errors.New("socket is not initialized") ) -// Initializes socket with provided name of +// InitSocket initializes socket with provided name of // environment variable. func InitSocket() error { notifySocket := os.Getenv("NOTIFY_SOCKET") @@ -51,7 +54,13 @@ func FlagAndStatus(status string) error { // must be sent, containing "READY=1". // // For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html - status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(nanotime())/1e3 /* microseconds in nanoseconds */) + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts); err != nil { + return fmt.Errorf("clock_gettime: %w", err) + } + status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10) + status += "\nSTATUS=RELOADING" + return Send(status) } status += "\nSTATUS=" + strings.TrimSuffix(status, "=1") return Send(status) @@ -62,6 +71,11 @@ func Status(status string) error { return Send("STATUS=" + status) } +// ClearStatus resets the current service status previously set by Status. +func ClearStatus() error { + return Status("") +} + // Send state through the notify socket if any. // If the notify socket was not detected, it returns an error. func Send(state string) error { diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go index 3b3e6a694..259064ecf 100644 --- a/pkg/util/sync/key_locker_test.go +++ b/pkg/util/sync/key_locker_test.go @@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) { taken := false eg, _ := errgroup.WithContext(context.Background()) keyLocker := NewKeyLocker[int]() - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { keyLocker.Lock(0) defer keyLocker.Unlock(0) diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go new file mode 100644 index 000000000..7373e538f --- /dev/null +++ b/pkg/util/testing/netmap_source.go @@ -0,0 +1,36 @@ +package testing + +import ( + "context" + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +var ( + errInvalidDiff = errors.New("invalid diff") + errNetmapNotFound = errors.New("netmap not found") +) + +type TestNetmapSource struct { + Netmaps map[uint64]*netmap.NetMap + CurrentEpoch uint64 +} + +func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { + if diff >= s.CurrentEpoch { + return nil, errInvalidDiff + } + return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) +} + +func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.Netmaps[epoch]; found { + return nm, nil + } + return nil, errNetmapNotFound +} + +func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { + return s.CurrentEpoch, nil +} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go new file mode 100644 index 000000000..39a420358 --- /dev/null +++ b/scripts/populate-metabase/internal/generate.go @@ -0,0 +1,133 @@ +package internal + +import ( + cryptorand "crypto/rand" + "crypto/sha256" + "fmt" + "math/rand" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" + "git.frostfs.info/TrueCloudLab/tzhash/tz" +) + +func GeneratePayloadPool(count uint, size uint) [][]byte { + var pool [][]byte + for range count { + payload := make([]byte, size) + _, _ = cryptorand.Read(payload) + + pool = append(pool, payload) + } + return pool +} + +func GenerateAttributePool(count uint) []objectSDK.Attribute { + var pool []objectSDK.Attribute + for i := range count { + for j := range count { + attr := *objectSDK.NewAttribute() + attr.SetKey(fmt.Sprintf("key%d", i)) + attr.SetValue(fmt.Sprintf("value%d", j)) + pool = append(pool, attr) + } + } + return pool +} + +func GenerateOwnerPool(count uint) []user.ID { + var pool []user.ID + for range count { + pool = append(pool, usertest.ID()) + } + return pool +} + +type ObjectOption func(obj *objectSDK.Object) + +func GenerateObject(options ...ObjectOption) *objectSDK.Object { + var ver version.Version + ver.SetMajor(2) + ver.SetMinor(1) + + payload := make([]byte, 0) + + var csum checksum.Checksum + csum.SetSHA256(sha256.Sum256(payload)) + + var csumTZ checksum.Checksum + csumTZ.SetTillichZemor(tz.Sum(csum.Value())) + + obj := objectSDK.New() + obj.SetID(oidtest.ID()) + obj.SetOwnerID(usertest.ID()) + obj.SetContainerID(cidtest.ID()) + + header := objecttest.Object().GetECHeader() + header.SetParent(oidtest.ID()) + obj.SetECHeader(header) + + obj.SetVersion(&ver) + obj.SetPayload(payload) + obj.SetPayloadSize(uint64(len(payload))) + obj.SetPayloadChecksum(csum) + obj.SetPayloadHomomorphicHash(csumTZ) + + for _, option := range options { + option(obj) + } + + return obj +} + +func WithContainerID(cid cid.ID) ObjectOption { + return func(obj *objectSDK.Object) { + obj.SetContainerID(cid) + } +} + +func WithType(typ objectSDK.Type) ObjectOption { + return func(obj *objectSDK.Object) { + obj.SetType(typ) + } +} + +func WithPayloadFromPool(pool [][]byte) ObjectOption { + payload := pool[rand.Intn(len(pool))] + + var csum checksum.Checksum + csum.SetSHA256(sha256.Sum256(payload)) + + var csumTZ checksum.Checksum + csumTZ.SetTillichZemor(tz.Sum(csum.Value())) + + return func(obj *objectSDK.Object) { + obj.SetPayload(payload) + obj.SetPayloadSize(uint64(len(payload))) + obj.SetPayloadChecksum(csum) + obj.SetPayloadHomomorphicHash(csumTZ) + } +} + +func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption { + return func(obj *objectSDK.Object) { + var attrs []objectSDK.Attribute + for range count { + attrs = append(attrs, pool[rand.Intn(len(pool))]) + } + obj.SetAttributes(attrs...) + } +} + +func WithOwnerIDFromPool(pool []user.ID) ObjectOption { + return func(obj *objectSDK.Object) { + obj.SetOwnerID(pool[rand.Intn(len(pool))]) + } +} diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go new file mode 100644 index 000000000..fafe61eaa --- /dev/null +++ b/scripts/populate-metabase/internal/populate.go @@ -0,0 +1,260 @@ +package internal + +import ( + "context" + "fmt" + "math/rand" + "sync" + + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "golang.org/x/sync/errgroup" +) + +type EpochState struct{} + +func (s EpochState) CurrentEpoch() uint64 { + return 0 +} + +func PopulateWithObjects( + ctx context.Context, + db *meta.DB, + group *errgroup.Group, + count uint, + factory func() *objectSDK.Object, +) { + digits := "0123456789" + + for range count { + obj := factory() + id := fmt.Appendf(nil, "%c/%c/%c", + digits[rand.Int()%len(digits)], + digits[rand.Int()%len(digits)], + digits[rand.Int()%len(digits)]) + + prm := meta.PutPrm{} + prm.SetObject(obj) + prm.SetStorageID(id) + + group.Go(func() error { + if _, err := db.Put(ctx, prm); err != nil { + return fmt.Errorf("couldn't put an object: %w", err) + } + return nil + }) + } +} + +func PopulateWithBigObjects( + ctx context.Context, + db *meta.DB, + group *errgroup.Group, + count uint, + factory func() *objectSDK.Object, +) { + for range count { + group.Go(func() error { + if err := populateWithBigObject(ctx, db, factory); err != nil { + return fmt.Errorf("couldn't put a big object: %w", err) + } + return nil + }) + } +} + +func populateWithBigObject( + ctx context.Context, + db *meta.DB, + factory func() *objectSDK.Object, +) error { + t := &target{db: db} + + pk, _ := keys.NewPrivateKey() + p := transformer.NewPayloadSizeLimiter(transformer.Params{ + Key: &pk.PrivateKey, + NextTargetInit: func() transformer.ObjectWriter { return t }, + NetworkState: EpochState{}, + MaxSize: 10, + }) + + obj := factory() + payload := make([]byte, 30) + + err := p.WriteHeader(ctx, obj) + if err != nil { + return err + } + + _, err = p.Write(ctx, payload) + if err != nil { + return err + } + + _, err = p.Close(ctx) + if err != nil { + return err + } + + return nil +} + +type target struct { + db *meta.DB +} + +func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error { + prm := meta.PutPrm{} + prm.SetObject(obj) + + _, err := t.db.Put(ctx, prm) + return err +} + +func PopulateGraveyard( + ctx context.Context, + db *meta.DB, + group *errgroup.Group, + workBufferSize int, + count uint, + factory func() *objectSDK.Object, +) { + ts := factory() + ts.SetType(objectSDK.TypeTombstone) + + prm := meta.PutPrm{} + prm.SetObject(ts) + + group.Go(func() error { + if _, err := db.Put(ctx, prm); err != nil { + return fmt.Errorf("couldn't put a tombstone object: %w", err) + } + return nil + }) + + cID, _ := ts.ContainerID() + oID, _ := ts.ID() + + var tsAddr oid.Address + + tsAddr.SetContainer(cID) + tsAddr.SetObject(oID) + + addrs := make(chan oid.Address, workBufferSize) + + go func() { + defer close(addrs) + + wg := &sync.WaitGroup{} + wg.Add(int(count)) + + for range count { + obj := factory() + + prm := meta.PutPrm{} + prm.SetObject(obj) + + group.Go(func() error { + defer wg.Done() + + if _, err := db.Put(ctx, prm); err != nil { + return fmt.Errorf("couldn't put an object: %w", err) + } + + cID, _ := obj.ContainerID() + oID, _ := obj.ID() + + var addr oid.Address + addr.SetContainer(cID) + addr.SetObject(oID) + + addrs <- addr + return nil + }) + } + wg.Wait() + }() + + go func() { + for addr := range addrs { + prm := meta.InhumePrm{} + prm.SetAddresses(addr) + prm.SetTombstoneAddress(tsAddr) + + group.Go(func() error { + if _, err := db.Inhume(ctx, prm); err != nil { + return fmt.Errorf("couldn't inhume an object: %w", err) + } + return nil + }) + } + }() +} + +func PopulateLocked( + ctx context.Context, + db *meta.DB, + group *errgroup.Group, + workBufferSize int, + count uint, + factory func() *objectSDK.Object, +) { + locker := factory() + locker.SetType(objectSDK.TypeLock) + + prm := meta.PutPrm{} + prm.SetObject(locker) + + group.Go(func() error { + if _, err := db.Put(ctx, prm); err != nil { + return fmt.Errorf("couldn't put a locker object: %w", err) + } + return nil + }) + + ids := make(chan oid.ID, workBufferSize) + + go func() { + defer close(ids) + + wg := &sync.WaitGroup{} + wg.Add(int(count)) + + for range count { + defer wg.Done() + + obj := factory() + + prm := meta.PutPrm{} + prm.SetObject(obj) + + group.Go(func() error { + if _, err := db.Put(ctx, prm); err != nil { + return fmt.Errorf("couldn't put an object: %w", err) + } + + id, _ := obj.ID() + ids <- id + return nil + }) + } + wg.Wait() + }() + + go func() { + for id := range ids { + lockerCID, _ := locker.ContainerID() + lockerOID, _ := locker.ID() + + group.Go(func() error { + if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil { + return fmt.Errorf("couldn't lock an object: %w", err) + } + return nil + }) + } + }() +} diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go new file mode 100644 index 000000000..8c4ea41ad --- /dev/null +++ b/scripts/populate-metabase/main.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "os" + + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "golang.org/x/sync/errgroup" +) + +var ( + path string + force bool + jobs uint + + numContainers, + numObjects, + numAttributesPerObj, + numOwners, + numPayloads, + numAttributes uint +) + +func main() { + flag.StringVar(&path, "path", "", "Path to metabase") + flag.BoolVar(&force, "force", false, "Rewrite existing database") + flag.UintVar(&jobs, "j", 10000, "Number of jobs to run") + + flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created") + flag.UintVar(&numObjects, "objects", 0, "Number of objects per container") + flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object") + + flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used") + flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used") + flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used") + + flag.Parse() + + exitIf(numPayloads == 0, "must have payloads\n") + exitIf(numAttributes == 0, "must have attributes\n") + exitIf(numOwners == 0, "must have owners\n") + exitIf(len(path) == 0, "path to metabase not specified\n") + exitIf( + numAttributesPerObj > numAttributes, + "object can't have more attributes than available\n", + ) + + info, err := os.Stat(path) + exitIf( + err != nil && !errors.Is(err, os.ErrNotExist), + "couldn't get path info: %s\n", err, + ) + + // Path exits. + if err == nil { + exitIf(info.IsDir(), "path is a directory\n") + exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n") + + err = os.Remove(path) + exitIf(err != nil, "couldn't remove existing file: %s\n", err) + } + + err = populate() + exitIf(err != nil, "couldn't populate the metabase: %s\n", err) +} + +func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object { + return func() *objectSDK.Object { + return internal.GenerateObject(opts...) + } +} + +func populate() (err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + db := meta.New([]meta.Option{ + meta.WithPath(path), + meta.WithPermissions(0o600), + meta.WithEpochState(internal.EpochState{}), + }...) + + if err = db.Open(ctx, mode.ReadWrite); err != nil { + return fmt.Errorf("couldn't open the metabase: %w", err) + } + defer func() { + if errOnClose := db.Close(ctx); errOnClose != nil { + err = errors.Join( + err, + fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)), + ) + } + }() + + if err = db.Init(ctx); err != nil { + return fmt.Errorf("couldn't init the metabase: %w", err) + } + + payloads := internal.GeneratePayloadPool(numPayloads, 32) + attributes := internal.GenerateAttributePool(numAttributes) + owners := internal.GenerateOwnerPool(numOwners) + + types := []objectSDK.Type{ + objectSDK.TypeRegular, + objectSDK.TypeLock, + objectSDK.TypeTombstone, + } + + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(int(jobs)) + + for range numContainers { + cid := cidtest.ID() + + for _, typ := range types { + internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory( + internal.WithContainerID(cid), + internal.WithType(typ), + internal.WithPayloadFromPool(payloads), + internal.WithOwnerIDFromPool(owners), + internal.WithAttributesFromPool(attributes, numAttributesPerObj), + )) + } + internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory( + internal.WithContainerID(cid), + internal.WithType(objectSDK.TypeRegular), + internal.WithAttributesFromPool(attributes, numAttributesPerObj), + internal.WithOwnerIDFromPool(owners), + )) + internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory( + internal.WithContainerID(cid), + internal.WithType(objectSDK.TypeRegular), + internal.WithAttributesFromPool(attributes, numAttributesPerObj), + internal.WithOwnerIDFromPool(owners), + )) + internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory( + internal.WithContainerID(cid), + internal.WithType(objectSDK.TypeRegular), + internal.WithAttributesFromPool(attributes, numAttributesPerObj), + internal.WithOwnerIDFromPool(owners), + )) + } + + return eg.Wait() +} + +func exitIf(cond bool, format string, args ...any) { + if cond { + fmt.Fprintf(os.Stderr, format, args...) + os.Exit(1) + } +}